You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ma...@apache.org on 2014/01/17 20:49:16 UTC

[01/12] AMBARI-4336. Move 1.3.4 stack to 1.3.3 using the python libraries. (mahadev)

Updated Branches:
  refs/heads/trunk 186d6a7fc -> 92583535d


http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/metainfo.xml
deleted file mode 100644
index 22c3eb8..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/metainfo.xml
+++ /dev/null
@@ -1,72 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>ZOOKEEPER</name>
-      <comment>Centralized service which provides highly reliable distributed coordination</comment>
-      <version>3.4.5.1.3.3.0</version>
-      <components>
-
-        <component>
-          <name>ZOOKEEPER_SERVER</name>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <commandScript>
-            <script>scripts/zookeeper_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>ZOOKEEPER_CLIENT</name>
-          <category>CLIENT</category>
-          <cardinality>0+</cardinality>
-          <commandScript>
-            <script>scripts/zookeeper_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>zookeeper</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <configuration-dependencies>
-        <config-type>global</config-type>
-      </configuration-dependencies>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/files/zkEnv.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/files/zkEnv.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/files/zkEnv.sh
deleted file mode 100644
index 07017e1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/files/zkEnv.sh
+++ /dev/null
@@ -1,96 +0,0 @@
-#!/bin/sh
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This script should be sourced into other zookeeper
-# scripts to setup the env variables
-
-# We use ZOOCFGDIR if defined,
-# otherwise we use /etc/zookeeper
-# or the conf directory that is
-# a sibling of this script's directory
-if [ "x$ZOOCFGDIR" = "x" ]
-then
-    if [ -d "/etc/zookeeper" ]
-    then
-        ZOOCFGDIR="/etc/zookeeper"
-    else
-        ZOOCFGDIR="$ZOOBINDIR/../conf"
-    fi
-fi
-
-if [ "x$ZOOCFG" = "x" ]
-then
-    ZOOCFG="zoo.cfg"
-fi
-
-ZOOCFG="$ZOOCFGDIR/$ZOOCFG"
-
-if [ -e "$ZOOCFGDIR/zookeeper-env.sh" ]
-then
-    . "$ZOOCFGDIR/zookeeper-env.sh"
-fi
-
-if [ "x${ZOO_LOG_DIR}" = "x" ]
-then
-    ZOO_LOG_DIR="."
-fi
-
-if [ "x${ZOO_LOG4J_PROP}" = "x" ]
-then
-    ZOO_LOG4J_PROP="INFO,CONSOLE"
-fi
-
-#add the zoocfg dir to classpath
-CLASSPATH="$ZOOCFGDIR:$CLASSPATH"
-
-for i in "$ZOOBINDIR"/../src/java/lib/*.jar
-do
-    CLASSPATH="$i:$CLASSPATH"
-done
-
-#make it work in the release
-for i in "$ZOOBINDIR"/../lib/*.jar
-do
-    CLASSPATH="$i:$CLASSPATH"
-done
-
-#make it work in the release
-for i in "$ZOOBINDIR"/../zookeeper-*.jar
-do
-    CLASSPATH="$i:$CLASSPATH"
-done
-
-#make it work for developers
-for d in "$ZOOBINDIR"/../build/lib/*.jar
-do
-   CLASSPATH="$d:$CLASSPATH"
-done
-
-#make it work for developers
-CLASSPATH="$ZOOBINDIR/../build/classes:$CLASSPATH"
-
-case "`uname`" in
-    CYGWIN*) cygwin=true ;;
-    *) cygwin=false ;;
-esac
-
-if $cygwin
-then
-    CLASSPATH=`cygpath -wp "$CLASSPATH"`
-fi
-
-#echo "CLASSPATH=$CLASSPATH"

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/files/zkServer.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/files/zkServer.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/files/zkServer.sh
deleted file mode 100644
index 49ceb4d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/files/zkServer.sh
+++ /dev/null
@@ -1,120 +0,0 @@
-#!/bin/sh
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# If this scripted is run out of /usr/bin or some other system bin directory
-# it should be linked to and not copied. Things like java jar files are found
-# relative to the canonical path of this script.
-#
-
-# See the following page for extensive details on setting
-# up the JVM to accept JMX remote management:
-# http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
-# by default we allow local JMX connections
-if [ "x$JMXLOCALONLY" = "x" ]
-then
-    JMXLOCALONLY=false
-fi
-
-if [ "x$JMXDISABLE" = "x" ]
-then
-    echo "JMX enabled by default"
-    # for some reason these two options are necessary on jdk6 on Ubuntu
-    #   accord to the docs they are not necessary, but otw jconsole cannot
-    #   do a local attach
-    ZOOMAIN="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.local.only=$JMXLOCALONLY org.apache.zookeeper.server.quorum.QuorumPeerMain"
-else
-    echo "JMX disabled by user request"
-    ZOOMAIN="org.apache.zookeeper.server.quorum.QuorumPeerMain"
-fi
-
-# Only follow symlinks if readlink supports it
-if readlink -f "$0" > /dev/null 2>&1
-then
-  ZOOBIN=`readlink -f "$0"`
-else
-  ZOOBIN="$0"
-fi
-ZOOBINDIR=`dirname "$ZOOBIN"`
-
-. "$ZOOBINDIR"/zkEnv.sh
-
-if [ "x$2" != "x" ]
-then
-    ZOOCFG="$ZOOCFGDIR/$2"
-fi
-
-if $cygwin
-then
-    ZOOCFG=`cygpath -wp "$ZOOCFG"`
-    # cygwin has a "kill" in the shell itself, gets confused
-    KILL=/bin/kill
-else
-    KILL=kill
-fi
-
-echo "Using config: $ZOOCFG"
-
-ZOOPIDFILE=$(grep dataDir "$ZOOCFG" | sed -e 's/.*=//')/zookeeper_server.pid
-
-
-case $1 in
-start)
-    echo  "Starting zookeeper ... "
-    $JAVA  "-Dzookeeper.log.dir=${ZOO_LOG_DIR}" "-Dzookeeper.root.logger=${ZOO_LOG4J_PROP}" \
-    -cp "$CLASSPATH" $JVMFLAGS $ZOOMAIN "$ZOOCFG" &
-    /bin/echo -n $! > "$ZOOPIDFILE"
-    echo STARTED
-    ;;
-stop)
-    echo "Stopping zookeeper ... "
-    if [ ! -f "$ZOOPIDFILE" ]
-    then
-    echo "error: could not find file $ZOOPIDFILE"
-    exit 1
-    else
-    $KILL -9 $(cat "$ZOOPIDFILE")
-    rm "$ZOOPIDFILE"
-    echo STOPPED
-    fi
-    ;;
-upgrade)
-    shift
-    echo "upgrading the servers to 3.*"
-    java "-Dzookeeper.log.dir=${ZOO_LOG_DIR}" "-Dzookeeper.root.logger=${ZOO_LOG4J_PROP}" \
-    -cp "$CLASSPATH" $JVMFLAGS org.apache.zookeeper.server.upgrade.UpgradeMain ${@}
-    echo "Upgrading ... "
-    ;;
-restart)
-    shift
-    "$0" stop ${@}
-    sleep 3
-    "$0" start ${@}
-    ;;
-status)
-    STAT=`echo stat | nc localhost $(grep clientPort "$ZOOCFG" | sed -e 's/.*=//') 2> /dev/null| grep Mode`
-    if [ "x$STAT" = "x" ]
-    then
-        echo "Error contacting service. It is probably not running."
-    else
-        echo $STAT
-    fi
-    ;;
-*)
-    echo "Usage: $0 {start|stop|restart|status}" >&2
-
-esac

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/files/zkService.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/files/zkService.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/files/zkService.sh
deleted file mode 100644
index 32dfce4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/files/zkService.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-zkcli_script=$1
-user=$2
-conf_dir=$3
-su - $user -c "source $conf_dir/zookeeper-env.sh ; echo 'ls /' | $zkcli_script"

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/files/zkSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/files/zkSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/files/zkSmoke.sh
deleted file mode 100644
index c1c11b4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/files/zkSmoke.sh
+++ /dev/null
@@ -1,78 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-smoke_script=$1
-smoke_user=$2
-conf_dir=$3
-client_port=$4
-security_enabled=$5
-kinit_path_local=$6
-smoke_user_keytab=$7
-export ZOOKEEPER_EXIT_CODE=0
-test_output_file=/tmp/zkSmoke.out
-errors_expr="ERROR|Exception"
-acceptable_expr="SecurityException"
-zkhosts=` grep "^server\.[[:digit:]]"  $conf_dir/zoo.cfg  | cut -f 2 -d '=' | cut -f 1 -d ':' | tr '\n' ' ' `
-zk_node1=`echo $zkhosts | tr ' ' '\n' | head -n 1`  
-echo "zk_node1=$zk_node1"
-if [[ $security_enabled == "True" ]]; then
-  kinitcmd="$kinit_path_local -kt $smoke_user_keytab $smoke_user"
-  su - $smoke_user -c "$kinitcmd"
-fi
-
-function verify_output() {
-  if [ -f $test_output_file ]; then
-    errors=`grep -E $errors_expr $test_output_file | grep -v $acceptable_expr`
-    if [ "$?" -eq 0 ]; then
-      echo "Error found in the zookeeper smoke test. Exiting."
-      echo $errors
-      exit 1
-    fi
-  fi
-}
-
-# Delete /zk_smoketest znode if exists
-su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ;  echo delete /zk_smoketest | ${smoke_script} -server $zk_node1:$client_port" 2>&1>$test_output_file
-# Create /zk_smoketest znode on one zookeeper server
-su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo create /zk_smoketest smoke_data | ${smoke_script} -server $zk_node1:$client_port" 2>&1>>$test_output_file
-verify_output
-
-for i in $zkhosts ; do
-  echo "Running test on host $i"
-  # Verify the data associated with znode across all the nodes in the zookeeper quorum
-  su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'get /zk_smoketest' | ${smoke_script} -server $i:$client_port"
-  su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'ls /' | ${smoke_script} -server $i:$client_port"
-  output=$(su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'get /zk_smoketest' | ${smoke_script} -server $i:$client_port")
-  echo $output | grep smoke_data
-  if [[ $? -ne 0 ]] ; then
-    echo "Data associated with znode /zk_smoketests is not consistent on host $i"
-    ((ZOOKEEPER_EXIT_CODE=$ZOOKEEPER_EXIT_CODE+1))
-  fi
-done
-
-su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'delete /zk_smoketest' | ${smoke_script} -server $zk_node1:$client_port"
-if [[ "$ZOOKEEPER_EXIT_CODE" -ne "0" ]] ; then
-  echo "Zookeeper Smoke Test: Failed" 
-else
-   echo "Zookeeper Smoke Test: Passed" 
-fi
-exit $ZOOKEEPER_EXIT_CODE

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/scripts/__init__.py
deleted file mode 100644
index a582077..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/scripts/__init__.py
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/scripts/params.py
deleted file mode 100644
index 9acc0c9..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/scripts/params.py
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-import status_params
-
-# server configurations
-config = Script.get_config()
-
-config_dir = "/etc/zookeeper/conf"
-zk_user =  config['configurations']['global']['zk_user']
-hostname = config['hostname']
-zk_bin = '/usr/lib/zookeeper/bin'
-user_group = config['configurations']['global']['user_group']
-
-smoke_script = "/usr/lib/zookeeper/bin/zkCli.sh"
-
-zk_log_dir = config['configurations']['global']['zk_log_dir']
-zk_data_dir = config['configurations']['global']['zk_data_dir']
-zk_pid_dir = status_params.zk_pid_dir
-zk_pid_file = status_params.zk_pid_file
-zk_server_heapsize = "-Xmx1024m"
-
-tickTime = config['configurations']['global']['tickTime']
-initLimit = config['configurations']['global']['initLimit']
-syncLimit = config['configurations']['global']['syncLimit']
-clientPort = config['configurations']['global']['clientPort']
-
-if 'zoo.cfg' in config['configurations']:
-  zoo_cfg_properties_map = config['configurations']['zoo.cfg']
-else:
-  zoo_cfg_properties_map = {}
-zoo_cfg_properties_map_length = len(zoo_cfg_properties_map)
-
-zk_primary_name = "zookeeper"
-zk_principal_name = "zookeeper/_HOST@EXAMPLE.COM"
-zk_principal = zk_principal_name.replace('_HOST',hostname)
-
-java64_home = config['hostLevelParams']['java_home']
-
-zookeeper_hosts = config['clusterHostInfo']['zookeeper_hosts']
-zookeeper_hosts.sort()
-
-keytab_path = "/etc/security/keytabs"
-zk_keytab_path = format("{keytab_path}/zk.service.keytab")
-zk_server_jaas_file = format("{config_dir}/zookeeper_jaas.conf")
-zk_client_jaas_file = format("{config_dir}/zookeeper_client_jaas.conf")
-security_enabled = config['configurations']['global']['security_enabled']
-
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-smokeuser = config['configurations']['global']['smokeuser']
-kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/scripts/service_check.py
deleted file mode 100644
index 6b3553d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/scripts/service_check.py
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-class ZookeeperServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    File("/tmp/zkSmoke.sh",
-         mode=0755,
-         content=StaticFile('zkSmoke.sh')
-    )
-
-    cmd_qourum = format("sh /tmp/zkSmoke.sh {smoke_script} {smokeuser} {config_dir} {clientPort} "
-                  "{security_enabled} {kinit_path_local} {smokeUserKeytab}",
-                  smokeUserKeytab=params.smoke_user_keytab if params.security_enabled else "no_keytab")
-
-    Execute(cmd_qourum,
-            tries=3,
-            try_sleep=5,
-            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-            logoutput=True
-    )
-
-if __name__ == "__main__":
-  ZookeeperServiceCheck().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/scripts/status_params.py
deleted file mode 100644
index 98f2903..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/scripts/status_params.py
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-zk_pid_dir = config['configurations']['global']['zk_pid_dir']
-zk_pid_file = format("{zk_pid_dir}/zookeeper_server.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/scripts/zookeeper.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/scripts/zookeeper.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/scripts/zookeeper.py
deleted file mode 100644
index c49eb22..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/scripts/zookeeper.py
+++ /dev/null
@@ -1,92 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-import sys
-
-
-def zookeeper(type = None):
-  import params
-
-  Directory(params.config_dir,
-            owner=params.zk_user,
-            recursive=True,
-            group=params.user_group
-  )
-
-  configFile("zoo.cfg", template_name="zoo.cfg.j2")
-  configFile("zookeeper-env.sh", template_name="zookeeper-env.sh.j2")
-  configFile("configuration.xsl", template_name="configuration.xsl.j2")
-
-  Directory(params.zk_pid_dir,
-            owner=params.zk_user,
-            recursive=True,
-            group=params.user_group
-  )
-
-  Directory(params.zk_log_dir,
-            owner=params.zk_user,
-            recursive=True,
-            group=params.user_group
-  )
-
-  Directory(params.zk_data_dir,
-            owner=params.zk_user,
-            recursive=True,
-            group=params.user_group
-  )
-
-  if type == 'server':
-    myid = str(sorted(params.zookeeper_hosts).index(params.hostname) + 1)
-
-    File(format("{zk_data_dir}/myid"),
-         mode = 0644,
-         content = myid
-    )
-
-  configFile("log4j.properties", template_name="log4j.properties.j2")
-
-  if params.security_enabled:
-    if type == "server":
-      configFile("zookeeper_jaas.conf", template_name="zookeeper_jaas.conf.j2")
-      configFile("zookeeper_client_jaas.conf", template_name="zookeeper_client_jaas.conf.j2")
-    else:
-      configFile("zookeeper_client_jaas.conf", template_name="zookeeper_client_jaas.conf.j2")
-
-  File(format("{config_dir}/zoo_sample.cfg"),
-       owner=params.zk_user,
-       group=params.user_group
-  )
-
-
-def configFile(name, template_name=None):
-  import params
-
-  File(format("{config_dir}/{name}"),
-       content=Template(template_name),
-       owner=params.zk_user,
-       group=params.user_group
-  )
-
-
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/scripts/zookeeper_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/scripts/zookeeper_client.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/scripts/zookeeper_client.py
deleted file mode 100644
index 028a37d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/scripts/zookeeper_client.py
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-from resource_management import *
-
-from zookeeper import zookeeper
-
-class ZookeeperClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    zookeeper(type='client')
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-if __name__ == "__main__":
-  ZookeeperClient().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/scripts/zookeeper_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/scripts/zookeeper_server.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/scripts/zookeeper_server.py
deleted file mode 100644
index e8cc264..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/scripts/zookeeper_server.py
+++ /dev/null
@@ -1,55 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-from resource_management import *
-
-from zookeeper import zookeeper
-from zookeeper_service import zookeeper_service
-
-class ZookeeperServer(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    zookeeper(type='server')
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env)
-    zookeeper_service(action = 'start')
-
-  def stop(self, env):
-    import params
-    env.set_params(params)
-    zookeeper_service(action = 'stop')
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.zk_pid_file)
-
-if __name__ == "__main__":
-  ZookeeperServer().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/scripts/zookeeper_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/scripts/zookeeper_service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/scripts/zookeeper_service.py
deleted file mode 100644
index 83b8f08..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/scripts/zookeeper_service.py
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-def zookeeper_service(action='start'):
-  import params
-
-  cmd = format("env ZOOCFGDIR={config_dir} ZOOCFG=zoo.cfg {zk_bin}/zkServer.sh")
-
-  if action == 'start':
-    daemon_cmd = format("source {config_dir}/zookeeper-env.sh ; {cmd} start")
-    no_op_test = format("ls {zk_pid_file} >/dev/null 2>&1 && ps `cat {zk_pid_file}` >/dev/null 2>&1")
-    Execute(daemon_cmd,
-            not_if=no_op_test,
-            user=params.zk_user
-    )
-  elif action == 'stop':
-    daemon_cmd = format("source {config_dir}/zookeeper-env.sh ; {cmd} stop")
-    rm_pid = format("rm -f {zk_pid_file}")
-    Execute(daemon_cmd,
-            user=params.zk_user
-    )
-    Execute(rm_pid)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/templates/configuration.xsl.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/templates/configuration.xsl.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/templates/configuration.xsl.j2
deleted file mode 100644
index ca498b4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/templates/configuration.xsl.j2
+++ /dev/null
@@ -1,37 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed under the Apache License, Version 2.0 (the "License");
-  you may not use this file except in compliance with the License.
-  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License. See accompanying LICENSE file.
--->
-<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
-<xsl:output method="html"/>
-<xsl:template match="configuration">
-<html>
-<body>
-<table border="1">
-<tr>
- <td>name</td>
- <td>value</td>
- <td>description</td>
-</tr>
-<xsl:for-each select="property">
-  <tr>
-     <td><a name="{name}"><xsl:value-of select="name"/></a></td>
-     <td><xsl:value-of select="value"/></td>
-     <td><xsl:value-of select="description"/></td>
-  </tr>
-</xsl:for-each>
-</table>
-</body>
-</html>
-</xsl:template>
-</xsl:stylesheet>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/templates/log4j.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/templates/log4j.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/templates/log4j.properties.j2
deleted file mode 100644
index db69564..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/templates/log4j.properties.j2
+++ /dev/null
@@ -1,71 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-#
-# ZooKeeper Logging Configuration
-#
-
-# Format is "<default threshold> (, <appender>)+
-
-# DEFAULT: console appender only
-log4j.rootLogger=INFO, CONSOLE
-
-# Example with rolling log file
-#log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE
-
-# Example with rolling log file and tracing
-#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE
-
-#
-# Log INFO level and above messages to the console
-#
-log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
-log4j.appender.CONSOLE.Threshold=INFO
-log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
-log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n
-
-#
-# Add ROLLINGFILE to rootLogger to get log file output
-#    Log DEBUG level and above messages to a log file
-log4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender
-log4j.appender.ROLLINGFILE.Threshold=DEBUG
-log4j.appender.ROLLINGFILE.File=zookeeper.log
-
-# Max log file size of 10MB
-log4j.appender.ROLLINGFILE.MaxFileSize=10MB
-# uncomment the next line to limit number of backup files
-#log4j.appender.ROLLINGFILE.MaxBackupIndex=10
-
-log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout
-log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n
-
-
-#
-# Add TRACEFILE to rootLogger to get log file output
-#    Log DEBUG level and above messages to a log file
-log4j.appender.TRACEFILE=org.apache.log4j.FileAppender
-log4j.appender.TRACEFILE.Threshold=TRACE
-log4j.appender.TRACEFILE.File=zookeeper_trace.log
-
-log4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout
-### Notice we are including log4j's NDC here (%x)
-log4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/templates/zoo.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/templates/zoo.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/templates/zoo.cfg.j2
deleted file mode 100644
index 5b68218..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/templates/zoo.cfg.j2
+++ /dev/null
@@ -1,51 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-# The number of milliseconds of each tick
-tickTime={{tickTime}}
-# The number of ticks that the initial
-# synchronization phase can take
-initLimit={{initLimit}}
-# The number of ticks that can pass between
-# sending a request and getting an acknowledgement
-syncLimit={{syncLimit}}
-# the directory where the snapshot is stored.
-dataDir={{zk_data_dir}}
-# the port at which the clients will connect
-clientPort={{clientPort}}
-{% for host in zookeeper_hosts %}
-server.{{loop.index}}={{host}}:2888:3888
-{% endfor %}
-
-{% if security_enabled %}
-authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider
-jaasLoginRenew=3600000
-kerberos.removeHostFromPrincipal=true
-kerberos.removeRealmFromPrincipal=true
-{% endif %}
-
-{% if zoo_cfg_properties_map_length > 0 %}
-# Custom properties
-{% endif %}
-{% for key, value in zoo_cfg_properties_map.iteritems() %}
-{{key}}={{value}}
-{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/templates/zookeeper-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/templates/zookeeper-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/templates/zookeeper-env.sh.j2
deleted file mode 100644
index 493a2a4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/templates/zookeeper-env.sh.j2
+++ /dev/null
@@ -1,25 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-export JAVA_HOME={{java64_home}}
-export ZOO_LOG_DIR={{zk_log_dir}}
-export ZOOPIDFILE={{zk_pid_file}}
-export SERVER_JVMFLAGS={{zk_server_heapsize}}
-export JAVA=$JAVA_HOME/bin/java
-export CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*
-
-{% if security_enabled %}
-export SERVER_JVMFLAGS="$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}"
-export CLIENT_JVMFLAGS="$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}"
-{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2
deleted file mode 100644
index c70449d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2
+++ /dev/null
@@ -1,22 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-Client {
-com.sun.security.auth.module.Krb5LoginModule required
-useKeyTab=false
-useTicketCache=true;
-};

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2
deleted file mode 100644
index 639cdaa..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2
+++ /dev/null
@@ -1,25 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-Server {
-com.sun.security.auth.module.Krb5LoginModule required
-useKeyTab=true
-storeKey=true
-useTicketCache=false
-keyTab="{{zk_keytab_path}}"
-principal="{{zk_principal}}";
-};


[11/12] AMBARI-4336. Move 1.3.4 stack to 1.3.3 using the python libraries. (mahadev)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/metainfo.xml
index 42bee82..a4c500d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/metainfo.xml
@@ -16,19 +16,91 @@
    limitations under the License.
 -->
 <metainfo>
-    <user>root</user>
-    <comment>Nagios Monitoring and Alerting system</comment>
-    <version>3.5.0</version>
-
-    <components>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>NAGIOS</name>
+      <comment>Nagios Monitoring and Alerting system</comment>
+      <version>3.5.0</version>
+      <components>
         <component>
             <name>NAGIOS_SERVER</name>
             <category>MASTER</category>
+            <cardinality>1</cardinality>
+            <commandScript>
+              <script>scripts/nagios_server.py</script>
+              <scriptType>PYTHON</scriptType>
+              <timeout>600</timeout>
+            </commandScript>
         </component>
-    </components>
-
-  <configuration-dependencies>
-    <config-type>global</config-type>
-  </configuration-dependencies>
-
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>perl</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>perl-Net-SNMP</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>nagios-plugins-1.4.9</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>nagios-3.5.0-99</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>nagios-www-3.5.0-99</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>nagios-devel-3.5.0-99</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>fping</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hdp_mon_nagios_addons</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osType>suse</osType>
+          <package>
+            <type>rpm</type>
+            <name>php5-json</name>
+          </package>
+        </osSpecific>
+        <osSpecific>
+          <osType>centos5</osType>
+          <package>
+            <type>rpm</type>
+            <name>php-pecl-json.x86_64</name>
+          </package>
+        </osSpecific>
+        <osSpecific>
+          <osType>redhat5</osType>
+          <package>
+            <type>rpm</type>
+            <name>php-pecl-json.x86_64</name>
+          </package>
+        </osSpecific>
+        <osSpecific>
+          <osType>oraclelinux5</osType>
+          <package>
+            <type>rpm</type>
+            <name>php-pecl-json.x86_64</name>
+          </package>
+        </osSpecific>
+      </osSpecifics>
+    </service>
+  </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/metainfo.xml
index e4af208..487104d 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/metainfo.xml
@@ -16,23 +16,98 @@
    limitations under the License.
 -->
 <metainfo>
-    <user>root</user>
-    <comment>System for workflow coordination and execution of Apache Hadoop jobs.  This also includes the installation of the optional Oozie Web Console which relies on and will install the &lt;a target="_blank" href="http://www.sencha.com/products/extjs/license/"&gt;ExtJS&lt;/a&gt; Library.</comment>
-    <version>3.3.2.1.3.3.0</version>
-
-    <components>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>OOZIE</name>
+      <comment>System for workflow coordination and execution of Apache Hadoop jobs.  This also includes the installation of the optional Oozie Web Console which relies on and will install the &lt;a target="_blank" href="http://www.sencha.com/products/extjs/license/"&gt;ExtJS&lt;/a&gt; Library.
+      </comment>
+      <version>3.3.2.1.3.3.0</version>
+      <components>
         <component>
-            <name>OOZIE_SERVER</name>
-            <category>MASTER</category>
+          <name>OOZIE_SERVER</name>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE/MAPREDUCE_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/oozie_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
 
         <component>
-            <name>OOZIE_CLIENT</name>
-            <category>CLIENT</category>
+          <name>OOZIE_CLIENT</name>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE/MAPREDUCE_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/oozie_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
         </component>
-    </components>
-    <configuration-dependencies>
-      <config-type>global</config-type>
-      <config-type>oozie-site</config-type>
-    </configuration-dependencies>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>oozie.noarch</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>oozie-client.noarch</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>extjs-2.2-1</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>global</config-type>
+        <config-type>oozie-site</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/metainfo.xml
index d29d56d..9fb2c06 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/metainfo.xml
@@ -16,15 +16,46 @@
    limitations under the License.
 -->
 <metainfo>
-    <user>root</user>
-    <comment>Scripting platform for analyzing large datasets</comment>
-    <version>0.11.1.1.3.3.0</version>
-
-    <components>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>PIG</name>
+      <comment>Scripting platform for analyzing large datasets</comment>
+      <version>0.11.1.1.3.3.0</version>
+      <components>
         <component>
-            <name>PIG</name>
-            <category>CLIENT</category>
+          <name>PIG</name>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <commandScript>
+            <script>scripts/pig_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
-    </components>
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osType>centos6</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>pig</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>global</config-type>
+      </configuration-dependencies>
 
+    </service>
+  </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/metainfo.xml
index ccf40b4..426bb25 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/metainfo.xml
@@ -16,15 +16,62 @@
    limitations under the License.
 -->
 <metainfo>
-    <user>root</user>
-    <comment>Tool for transferring bulk data between Apache Hadoop and structured data stores such as relational databases</comment>
-    <version>1.4.3.1.3.3.0</version>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>SQOOP</name>
+      <comment>Tool for transferring bulk data between Apache Hadoop and
+        structured data stores such as relational databases
+      </comment>
+      <version>1.4.3.1.3.3.0</version>
 
-    <components>
+      <components>
         <component>
-            <name>SQOOP</name>
-            <category>CLIENT</category>
+          <name>SQOOP</name>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE/MAPREDUCE_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/sqoop_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
         </component>
-    </components>
-
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>sqoop</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>mysql-connector-java</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+    </service>
+  </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/metainfo.xml
index d9ebebc..d6c2a1f 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/metainfo.xml
@@ -16,21 +16,82 @@
    limitations under the License.
 -->
 <metainfo>
-    <user>root</user>
-    <comment>This is comment for WEBHCAT service</comment>
-    <version>0.11.0.1.3.3.0</version>
-
-    <components>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>WEBHCAT</name>
+      <comment>This is comment for WEBHCAT service</comment>
+      <version>0.11.0.1.3.3.0</version>
+      <components>
         <component>
-            <name>WEBHCAT_SERVER</name>
-            <category>MASTER</category>
+          <name>WEBHCAT_SERVER</name>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>MAPREDUCE/MAPREDUCE_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
+              <scope>cluster</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+                <co-locate>WEBHCAT/WEBHCAT_SERVER</co-locate>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>ZOOKEEPER/ZOOKEEPER_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/webhcat_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
-    </components>
-
-  <configuration-dependencies>
-    <config-type>global</config-type>
-    <config-type>webhcat-site</config-type>
-  </configuration-dependencies>
-
-
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>hcatalog</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>webhcat-tar-hive</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>webhcat-tar-pig</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+      <configuration-dependencies>
+        <config-type>webhcat-site</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/metainfo.xml
index d1c2796..22c3eb8 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/metainfo.xml
@@ -16,24 +16,57 @@
    limitations under the License.
 -->
 <metainfo>
-    <user>root</user>
-    <comment>Centralized service which provides highly reliable distributed coordination</comment>
-    <version>3.4.5.1.3.3.0</version>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>ZOOKEEPER</name>
+      <comment>Centralized service which provides highly reliable distributed coordination</comment>
+      <version>3.4.5.1.3.3.0</version>
+      <components>
 
-    <components>
         <component>
-            <name>ZOOKEEPER_SERVER</name>
-            <category>MASTER</category>
+          <name>ZOOKEEPER_SERVER</name>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <commandScript>
+            <script>scripts/zookeeper_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
 
         <component>
-            <name>ZOOKEEPER_CLIENT</name>
-            <category>CLIENT</category>
+          <name>ZOOKEEPER_CLIENT</name>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <commandScript>
+            <script>scripts/zookeeper_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
         </component>
-    </components>
+      </components>
 
-  <configuration-dependencies>
-    <config-type>global</config-type>
-  </configuration-dependencies>
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>zookeeper</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
 
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>global</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-INSTALL/files/changeToSecureUid.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-INSTALL/files/changeToSecureUid.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-INSTALL/files/changeToSecureUid.sh
deleted file mode 100644
index 4872a10..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-INSTALL/files/changeToSecureUid.sh
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-username=$1
-directories=$2
-
-function find_available_uid() {
- for ((i=1001; i<=2000; i++))
- do
-   grep -q $i /etc/passwd
-   if [ "$?" -ne 0 ]
-   then
-    newUid=$i
-    break
-   fi
- done
-}
-
-find_available_uid
-
-if [ $newUid -eq 0 ]
-then
-  echo "Failed to find Uid between 1000 and 2000"
-  exit 1
-fi
-
-dir_array=($(echo $directories | sed 's/,/\n/g'))
-old_uid=$(id -u $username)
-echo "Changing uid of $username from $old_uid to $newUid"
-echo "Changing directory permisions for ${dir_array[@]}"
-usermod -u $newUid $username && for dir in ${dir_array[@]} ; do chown -Rh $newUid $dir ; done
-exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-INSTALL/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-INSTALL/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-INSTALL/scripts/hook.py
deleted file mode 100644
index 51e5cd2..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-INSTALL/scripts/hook.py
+++ /dev/null
@@ -1,36 +0,0 @@
-##!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-from shared_initialization import *
-
-#TODO this must be "CONFIGURE" hook when CONFIGURE command will be implemented
-class BeforeConfigureHook(Hook):
-
-  def hook(self, env):
-    import params
-
-    env.set_params(params)
-    setup_users()
-    install_packages()
-
-if __name__ == "__main__":
-  BeforeConfigureHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-INSTALL/scripts/params.py
deleted file mode 100644
index fa19ca3..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-INSTALL/scripts/params.py
+++ /dev/null
@@ -1,81 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from resource_management.core.system import System
-import os
-
-config = Script.get_config()
-
-#users and groups
-yarn_user = config['configurations']['global']['yarn_user']
-hbase_user = config['configurations']['global']['hbase_user']
-nagios_user = config['configurations']['global']['nagios_user']
-oozie_user = config['configurations']['global']['oozie_user']
-webhcat_user = config['configurations']['global']['hcat_user']
-hcat_user = config['configurations']['global']['hcat_user']
-hive_user = config['configurations']['global']['hive_user']
-smoke_user =  config['configurations']['global']['smokeuser']
-mapred_user = config['configurations']['global']['mapred_user']
-hdfs_user = config['configurations']['global']['hdfs_user']
-zk_user = config['configurations']['global']['zk_user']
-gmetad_user = config['configurations']['global']["gmetad_user"]
-gmond_user = config['configurations']['global']["gmond_user"]
-
-user_group = config['configurations']['global']['user_group']
-proxyuser_group =  config['configurations']['global']['proxyuser_group']
-nagios_group = config['configurations']['global']['nagios_group']
-smoke_user_group =  "users"
-mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
-
-#hosts
-hostname = config["hostname"]
-rm_host = default("/clusterHostInfo/rm_host", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
-oozie_servers = default("/clusterHostInfo/oozie_server", [])
-hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
-hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-hs_host = default("/clusterHostInfo/hs_host", [])
-jtnode_host = default("/clusterHostInfo/jtnode_host", [])
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-
-has_resourcemanager = not len(rm_host) == 0
-has_slaves = not len(slave_hosts) == 0
-has_nagios = not len(hagios_server_hosts) == 0
-has_oozie_server = not len(oozie_servers)  == 0
-has_hcat_server_host = not len(hcat_server_hosts)  == 0
-has_hive_server_host = not len(hive_server_host)  == 0
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_zk_host = not len(zk_hosts) == 0
-has_ganglia_server = not len(ganglia_server_hosts) == 0
-
-is_namenode_master = hostname in namenode_host
-is_jtnode_master = hostname in jtnode_host
-is_rmnode_master = hostname in rm_host
-is_hsnode_master = hostname in hs_host
-is_hbase_master = hostname in hbase_master_hosts
-is_slave = hostname in slave_hosts
-if has_ganglia_server:
-  ganglia_server_host = ganglia_server_hosts[0]
-
-hbase_tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-INSTALL/scripts/shared_initialization.py
deleted file mode 100644
index 26a7592..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-INSTALL/scripts/shared_initialization.py
+++ /dev/null
@@ -1,107 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-
-from resource_management import *
-
-def setup_users():
-  """
-  Creates users before cluster installation
-  """
-  import params
-
-  Group(params.user_group)
-  Group(params.smoke_user_group)
-  Group(params.proxyuser_group)
-  User(params.smoke_user,
-       gid=params.user_group,
-       groups=[params.proxyuser_group]
-  )
-  smoke_user_dirs = format(
-    "/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
-  set_uid(params.smoke_user, smoke_user_dirs)
-
-  if params.has_hbase_masters:
-    User(params.hbase_user,
-         gid = params.user_group,
-         groups=[params.user_group])
-    hbase_user_dirs = format(
-      "/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
-    set_uid(params.hbase_user, hbase_user_dirs)
-
-  if params.has_nagios:
-    Group(params.nagios_group)
-    User(params.nagios_user,
-         gid=params.nagios_group)
-
-  if params.has_oozie_server:
-    User(params.oozie_user,
-         gid = params.user_group)
-
-  if params.has_hcat_server_host:
-    User(params.webhcat_user,
-         gid = params.user_group)
-    User(params.hcat_user,
-         gid = params.user_group)
-
-  if params.has_hive_server_host:
-    User(params.hive_user,
-         gid = params.user_group)
-
-  if params.has_resourcemanager:
-    User(params.yarn_user,
-         gid = params.user_group)
-
-  if params.has_ganglia_server:
-    Group(params.gmetad_user)
-    Group(params.gmond_user)
-    User(params.gmond_user,
-         gid=params.user_group,
-        groups=[params.gmond_user])
-    User(params.gmetad_user,
-         gid=params.user_group,
-        groups=[params.gmetad_user])
-
-  User(params.hdfs_user,
-        gid=params.user_group,
-        groups=[params.user_group]
-  )
-  User(params.mapred_user,
-       gid=params.user_group,
-       groups=[params.user_group]
-  )
-  if params.has_zk_host:
-    User(params.zk_user,
-         gid=params.user_group)
-
-def set_uid(user, user_dirs):
-  """
-  user_dirs - comma separated directories
-  """
-  File("/tmp/changeUid.sh",
-       content=StaticFile("changeToSecureUid.sh"),
-       mode=0555)
-  Execute(format("/tmp/changeUid.sh {user} {user_dirs} 2>/dev/null"),
-          not_if = format("test $(id -u {user}) -gt 1000"))
-
-def install_packages():
-  Package("unzip")
-  Package("net-snmp")
-  Package("net-snmp-utils")
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/files/checkForFormat.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/files/checkForFormat.sh
deleted file mode 100644
index d14091a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/files/checkForFormat.sh
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-export hdfs_user=$1
-shift
-export conf_dir=$1
-shift
-export mark_dir=$1
-shift
-export name_dirs=$*
-
-export EXIT_CODE=0
-export command="namenode -format"
-export list_of_non_empty_dirs=""
-
-mark_file=/var/run/hadoop/hdfs/namenode-formatted
-if [[ -f ${mark_file} ]] ; then
-  rm -f ${mark_file}
-  mkdir -p ${mark_dir}
-fi
-
-if [[ ! -d $mark_dir ]] ; then
-  for dir in `echo $name_dirs | tr ',' ' '` ; do
-    echo "NameNode Dirname = $dir"
-    cmd="ls $dir | wc -l  | grep -q ^0$"
-    eval $cmd
-    if [[ $? -ne 0 ]] ; then
-      (( EXIT_CODE = $EXIT_CODE + 1 ))
-      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
-    fi
-  done
-
-  if [[ $EXIT_CODE == 0 ]] ; then
-    su - ${hdfs_user} -c "yes Y | hadoop --config ${conf_dir} ${command}"
-  else
-    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
-  fi
-else
-  echo "${mark_dir} exists. Namenode DFS already formatted"
-fi
-
-exit $EXIT_CODE
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/files/task-log4j.properties
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/files/task-log4j.properties b/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/files/task-log4j.properties
deleted file mode 100644
index c8939fc..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/files/task-log4j.properties
+++ /dev/null
@@ -1,132 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-
-# Define some default values that can be overridden by system properties
-hadoop.root.logger=INFO,console
-hadoop.log.dir=.
-hadoop.log.file=hadoop.log
-
-#
-# Job Summary Appender 
-#
-# Use following logger to send summary to separate file defined by 
-# hadoop.mapreduce.jobsummary.log.file rolled daily:
-# hadoop.mapreduce.jobsummary.logger=INFO,JSA
-# 
-hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
-hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
-
-# Define the root logger to the system property "hadoop.root.logger".
-log4j.rootLogger=${hadoop.root.logger}, EventCounter
-
-# Logging Threshold
-log4j.threshhold=ALL
-
-#
-# Daily Rolling File Appender
-#
-
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Rollver at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-# 30-day backup
-#log4j.appender.DRFA.MaxBackupIndex=30
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this 
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-
-#
-# TaskLog Appender
-#
-
-#Default values
-hadoop.tasklog.taskid=null
-hadoop.tasklog.iscleanup=false
-hadoop.tasklog.noKeepSplits=4
-hadoop.tasklog.totalLogFileSize=100
-hadoop.tasklog.purgeLogSplits=true
-hadoop.tasklog.logsRetainHours=12
-
-log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
-log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
-log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
-log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
-
-log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
-log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-
-#
-# Rolling File Appender
-#
-
-#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
-#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Logfile size and and 30-day backups
-#log4j.appender.RFA.MaxFileSize=1MB
-#log4j.appender.RFA.MaxBackupIndex=30
-
-#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-# Custom Logging levels
-
-hadoop.metrics.log.level=INFO
-#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
-#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
-#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
-log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
-
-# Jets3t library
-log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
-
-#
-# Null Appender
-# Trap security logger on the hadoop client side
-#
-log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
-
-#
-# Event Counter Appender
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
-#
-log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/scripts/hook.py
deleted file mode 100644
index e11bfac..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/scripts/hook.py
+++ /dev/null
@@ -1,37 +0,0 @@
-##!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-from shared_initialization import *
-
-#TODO this must be "CONFIGURE" hook when CONFIGURE command will be implemented
-class BeforeConfigureHook(Hook):
-
-  def hook(self, env):
-    import params
-
-    env.set_params(params)
-    setup_java()
-    setup_hadoop()
-    setup_configs()
-
-if __name__ == "__main__":
-  BeforeConfigureHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/scripts/params.py
deleted file mode 100644
index aabb406..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/scripts/params.py
+++ /dev/null
@@ -1,172 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from resource_management.core.system import System
-import os
-
-config = Script.get_config()
-
-#java params
-artifact_dir = "/tmp/HDP-artifacts/"
-jdk_name = default("/hostLevelParams/jdk_name", None) # None when jdk is already installed by user
-jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user
-jce_location = config['hostLevelParams']['jdk_location']
-jdk_location = config['hostLevelParams']['jdk_location']
-#security params
-security_enabled = config['configurations']['global']['security_enabled']
-dfs_journalnode_keytab_file = config['configurations']['hdfs-site']['dfs.journalnode.keytab.file']
-dfs_web_authentication_kerberos_keytab = config['configurations']['hdfs-site']['dfs.journalnode.keytab.file']
-dfs_secondary_namenode_keytab_file =  config['configurations']['hdfs-site']['fs.secondary.namenode.keytab.file']
-dfs_datanode_keytab_file =  config['configurations']['hdfs-site']['dfs.datanode.keytab.file']
-dfs_namenode_keytab_file =  config['configurations']['hdfs-site']['dfs.namenode.keytab.file']
-
-dfs_datanode_kerberos_principal = config['configurations']['hdfs-site']['dfs.datanode.kerberos.principal']
-dfs_journalnode_kerberos_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.principal']
-dfs_secondary_namenode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.internal.spnego.principal']
-dfs_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.namenode.kerberos.principal']
-dfs_web_authentication_kerberos_principal = config['configurations']['hdfs-site']['dfs.web.authentication.kerberos.principal']
-dfs_secondary_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.principal']
-dfs_journalnode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.internal.spnego.principal']
-
-#users and groups
-mapred_user = config['configurations']['global']['mapred_user']
-hdfs_user = config['configurations']['global']['hdfs_user']
-yarn_user = config['configurations']['global']['yarn_user']
-
-user_group = config['configurations']['global']['user_group']
-mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
-
-#snmp
-snmp_conf_dir = "/etc/snmp/"
-snmp_source = "0.0.0.0/0"
-snmp_community = "hadoop"
-
-#hosts
-hostname = config["hostname"]
-rm_host = default("/clusterHostInfo/rm_host", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
-oozie_servers = default("/clusterHostInfo/oozie_server", [])
-hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
-hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-hs_host = default("/clusterHostInfo/hs_host", [])
-jtnode_host = default("/clusterHostInfo/jtnode_host", [])
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-
-has_resourcemanager = not len(rm_host) == 0
-has_slaves = not len(slave_hosts) == 0
-has_nagios = not len(hagios_server_hosts) == 0
-has_oozie_server = not len(oozie_servers)  == 0
-has_hcat_server_host = not len(hcat_server_hosts)  == 0
-has_hive_server_host = not len(hive_server_host)  == 0
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_zk_host = not len(zk_hosts) == 0
-has_ganglia_server = not len(ganglia_server_hosts) == 0
-
-is_namenode_master = hostname in namenode_host
-is_jtnode_master = hostname in jtnode_host
-is_rmnode_master = hostname in rm_host
-is_hsnode_master = hostname in hs_host
-is_hbase_master = hostname in hbase_master_hosts
-is_slave = hostname in slave_hosts
-if has_ganglia_server:
-  ganglia_server_host = ganglia_server_hosts[0]
-#hadoop params
-hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}")
-hadoop_lib_home = "/usr/lib/hadoop/lib"
-hadoop_conf_dir = "/etc/hadoop/conf"
-hadoop_pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix']
-hadoop_home = "/usr"
-hadoop_bin = "/usr/lib/hadoop/bin"
-
-task_log4j_properties_location = os.path.join(hadoop_conf_dir, "task-log4j.properties")
-limits_conf_dir = "/etc/security/limits.d"
-
-hdfs_log_dir_prefix = config['configurations']['global']['hdfs_log_dir_prefix']
-hbase_tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
-#db params
-server_db_name = config['hostLevelParams']['db_name']
-db_driver_filename = config['hostLevelParams']['db_driver_filename']
-oracle_driver_url = config['hostLevelParams']['oracle_jdbc_url']
-mysql_driver_url = config['hostLevelParams']['mysql_jdbc_url']
-
-ambari_db_rca_url = config['hostLevelParams']['ambari_db_rca_url']
-ambari_db_rca_driver = config['hostLevelParams']['ambari_db_rca_driver']
-ambari_db_rca_username = config['hostLevelParams']['ambari_db_rca_username']
-ambari_db_rca_password = config['hostLevelParams']['ambari_db_rca_password']
-
-rca_enabled = config['configurations']['global']['rca_enabled']
-rca_disabled_prefix = "###"
-if rca_enabled == True:
-  rca_prefix = ""
-else:
-  rca_prefix = rca_disabled_prefix
-
-#hadoop-env.sh
-java_home = config['hostLevelParams']['java_home']
-if System.get_instance().platform == "suse":
-  jsvc_path = "/usr/lib/bigtop-utils"
-else:
-  jsvc_path = "/usr/libexec/bigtop-utils"
-
-hadoop_heapsize = config['configurations']['global']['hadoop_heapsize']
-namenode_heapsize = config['configurations']['global']['namenode_heapsize']
-namenode_opt_newsize =  config['configurations']['global']['namenode_opt_newsize']
-namenode_opt_maxnewsize =  config['configurations']['global']['namenode_opt_maxnewsize']
-
-jtnode_opt_newsize = default("jtnode_opt_newsize","200m")
-jtnode_opt_maxnewsize = default("jtnode_opt_maxnewsize","200m")
-jtnode_heapsize =  default("jtnode_heapsize","1024m")
-ttnode_heapsize = "1024m"
-
-dtnode_heapsize = config['configurations']['global']['dtnode_heapsize']
-mapred_pid_dir_prefix = default("mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
-hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
-mapred_log_dir_prefix = default("mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
-
-#taskcontroller.cfg
-
-mapred_local_dir = "/tmp/hadoop-mapred/mapred/local"
-
-#log4j.properties
-
-yarn_log_dir_prefix = default("yarn_log_dir_prefix","/var/log/hadoop-yarn")
-
-#hdfs ha properties
-dfs_ha_enabled = False
-dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
-dfs_ha_namenode_ids = default(format("hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
-if dfs_ha_namenode_ids:
-  dfs_ha_namenode_ids_array_len = len(dfs_ha_namenode_ids.split(","))
-  if dfs_ha_namenode_ids_array_len > 1:
-    dfs_ha_enabled = True
-
-if dfs_ha_enabled:
-  for nn_id in dfs_ha_namenode_ids:
-    nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
-    if hostname in nn_host:
-      namenode_id = nn_id
-  namenode_id = None
-
-dfs_hosts = default('/configurations/hdfs-site/dfs.hosts', None)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/scripts/shared_initialization.py
deleted file mode 100644
index 7b406e1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/scripts/shared_initialization.py
+++ /dev/null
@@ -1,322 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-
-from resource_management import *
-
-def setup_java():
-  """
-  Installs jdk using specific params, that comes from ambari-server
-  """
-  import params
-
-  jdk_curl_target = format("{artifact_dir}/{jdk_name}")
-  java_dir = os.path.dirname(params.java_home)
-  java_exec = format("{java_home}/bin/java")
-  
-  if not params.jdk_name:
-    return
-  
-  Execute(format("mkdir -p {artifact_dir} ; curl -kf --retry 10 {jdk_location}/{jdk_name} -o {jdk_curl_target}"),
-          path = ["/bin","/usr/bin/"],
-          not_if = format("test -e {java_exec}"))
-
-  if params.jdk_name.endswith(".bin"):
-    install_cmd = format("mkdir -p {java_dir} ; chmod +x {jdk_curl_target}; cd {java_dir} ; echo A | {jdk_curl_target} -noregister > /dev/null 2>&1")
-  elif params.jdk_name.endswith(".gz"):
-    install_cmd = format("mkdir -p {java_dir} ; cd {java_dir} ; tar -xf {jdk_curl_target} > /dev/null 2>&1")
-  
-  Execute(install_cmd,
-          path = ["/bin","/usr/bin/"],
-          not_if = format("test -e {java_exec}")
-  )
-  jce_curl_target = format("{artifact_dir}/{jce_policy_zip}")
-  download_jce = format("mkdir -p {artifact_dir}; curl -kf --retry 10 {jce_location}/{jce_policy_zip} -o {jce_curl_target}")
-  Execute( download_jce,
-        path = ["/bin","/usr/bin/"],
-        not_if =format("test -e {jce_curl_target}"),
-        ignore_failures = True
-  )
-  
-  if params.security_enabled:
-    security_dir = format("{java_home}/jre/lib/security")
-    extract_cmd = format("rm -f local_policy.jar; rm -f US_export_policy.jar; unzip -o -j -q {jce_curl_target}")
-    Execute(extract_cmd,
-          only_if = format("test -e {security_dir} && test -f {jce_curl_target}"),
-          cwd  = security_dir,
-          path = ['/bin/','/usr/bin']
-    )
-
-def setup_hadoop():
-  """
-  Setup hadoop files and directories
-  """
-  import params
-
-  File(os.path.join(params.snmp_conf_dir, 'snmpd.conf'),
-       content=Template("snmpd.conf.j2"))
-  Service("snmpd",
-          action = "restart")
-
-  Execute("/bin/echo 0 > /selinux/enforce",
-          only_if="test -f /selinux/enforce"
-  )
-
-  install_snappy()
-
-  #directories
-  Directory(params.hadoop_conf_dir,
-            recursive=True,
-            owner='root',
-            group='root'
-  )
-  Directory(params.hdfs_log_dir_prefix,
-            recursive=True,
-            owner='root',
-            group='root'
-  )
-  Directory(params.hadoop_pid_dir_prefix,
-            recursive=True,
-            owner='root',
-            group='root'
-  )
-
-  #files
-  File(os.path.join(params.limits_conf_dir, 'hdfs.conf'),
-       owner='root',
-       group='root',
-       mode=0644,
-       content=Template("hdfs.conf.j2")
-  )
-  if params.security_enabled:
-    File(os.path.join(params.hadoop_bin, "task-controller"),
-         owner="root",
-         group=params.mapred_tt_group,
-         mode=06050
-    )
-    tc_mode = 0644
-    tc_owner = "root"
-  else:
-    tc_mode = None
-    tc_owner = params.hdfs_user
-
-  if tc_mode:
-    File(os.path.join(params.hadoop_conf_dir, 'taskcontroller.cfg'),
-         owner = tc_owner,
-         mode = tc_mode,
-         group = params.mapred_tt_group,
-         content=Template("taskcontroller.cfg.j2")
-    )
-  else:
-    File(os.path.join(params.hadoop_conf_dir, 'taskcontroller.cfg'),
-         owner=tc_owner,
-         content=Template("taskcontroller.cfg.j2")
-    )
-  for file in ['hadoop-env.sh', 'commons-logging.properties', 'slaves']:
-    File(os.path.join(params.hadoop_conf_dir, file),
-         owner=tc_owner,
-         content=Template(file + ".j2")
-    )
-
-  health_check_template = "health_check" #for stack 1 use 'health_check'
-  File(os.path.join(params.hadoop_conf_dir, "health_check"),
-       owner=tc_owner,
-       content=Template(health_check_template + ".j2")
-  )
-
-  File(os.path.join(params.hadoop_conf_dir, "log4j.properties"),
-       owner=params.hdfs_user,
-       content=Template("log4j.properties.j2")
-  )
-
-  update_log4j_props(os.path.join(params.hadoop_conf_dir, "log4j.properties"))
-
-  File(os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"),
-       owner=params.hdfs_user,
-       content=Template("hadoop-metrics2.properties.j2")
-  )
-
-  db_driver_dload_cmd = ""
-  if params.server_db_name == 'oracle' and params.oracle_driver_url != "":
-    db_driver_dload_cmd = format(
-      "curl -kf --retry 5 {oracle_driver_url} -o {hadoop_lib_home}/{db_driver_filename}")
-  elif params.server_db_name == 'mysql' and params.mysql_driver_url != "":
-    db_driver_dload_cmd = format(
-      "curl -kf --retry 5 {mysql_driver_url} -o {hadoop_lib_home}/{db_driver_filename}")
-
-  if db_driver_dload_cmd:
-    Execute(db_driver_dload_cmd,
-            not_if =format("test -e {hadoop_lib_home}/{db_driver_filename}")
-    )
-
-
-def setup_configs():
-  """
-  Creates configs for services DHFS mapred
-  """
-  import params
-
-  if "mapred-queue-acls" in params.config['configurations']:
-    XmlConfig("mapred-queue-acls.xml",
-              conf_dir=params.hadoop_conf_dir,
-              configurations=params.config['configurations'][
-                'mapred-queue-acls'],
-              owner=params.mapred_user,
-              group=params.user_group
-    )
-  elif os.path.exists(
-      os.path.join(params.hadoop_conf_dir, "mapred-queue-acls.xml")):
-    File(os.path.join(params.hadoop_conf_dir, "mapred-queue-acls.xml"),
-         owner=params.mapred_user,
-         group=params.user_group
-    )
-
-  if "hadoop-policy" in params.config['configurations']:
-    XmlConfig("hadoop-policy.xml",
-              conf_dir=params.hadoop_conf_dir,
-              configurations=params.config['configurations']['hadoop-policy'],
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-
-  XmlConfig("core-site.xml",
-            conf_dir=params.hadoop_conf_dir,
-            configurations=params.config['configurations']['core-site'],
-            owner=params.hdfs_user,
-            group=params.user_group
-  )
-
-  if "mapred-site" in params.config['configurations']:
-    XmlConfig("mapred-site.xml",
-              conf_dir=params.hadoop_conf_dir,
-              configurations=params.config['configurations']['mapred-site'],
-              owner=params.mapred_user,
-              group=params.user_group
-    )
-
-  File(params.task_log4j_properties_location,
-       content=StaticFile("task-log4j.properties"),
-       mode=0755
-  )
-
-  if "capacity-scheduler" in params.config['configurations']:
-    XmlConfig("capacity-scheduler.xml",
-              conf_dir=params.hadoop_conf_dir,
-              configurations=params.config['configurations'][
-                'capacity-scheduler'],
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-
-  XmlConfig("hdfs-site.xml",
-            conf_dir=params.hadoop_conf_dir,
-            configurations=params.config['configurations']['hdfs-site'],
-            owner=params.hdfs_user,
-            group=params.user_group
-  )
-
-  # if params.stack_version[0] == "1":
-  Link('/usr/lib/hadoop/lib/hadoop-tools.jar',
-       to = '/usr/lib/hadoop/hadoop-tools.jar'
-  )
-
-  if os.path.exists(os.path.join(params.hadoop_conf_dir, 'configuration.xsl')):
-    File(os.path.join(params.hadoop_conf_dir, 'configuration.xsl'),
-         owner=params.hdfs_user,
-         group=params.user_group
-    )
-  if os.path.exists(os.path.join(params.hadoop_conf_dir, 'fair-scheduler.xml')):
-    File(os.path.join(params.hadoop_conf_dir, 'fair-scheduler.xml'),
-         owner=params.mapred_user,
-         group=params.user_group
-    )
-  if os.path.exists(os.path.join(params.hadoop_conf_dir, 'masters')):
-    File(os.path.join(params.hadoop_conf_dir, 'masters'),
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-  if os.path.exists(
-      os.path.join(params.hadoop_conf_dir, 'ssl-client.xml.example')):
-    File(os.path.join(params.hadoop_conf_dir, 'ssl-client.xml.example'),
-         owner=params.mapred_user,
-         group=params.user_group
-    )
-  if os.path.exists(
-      os.path.join(params.hadoop_conf_dir, 'ssl-server.xml.example')):
-    File(os.path.join(params.hadoop_conf_dir, 'ssl-server.xml.example'),
-         owner=params.mapred_user,
-         group=params.user_group
-    )
-
-  # generate_include_file()
-
-def update_log4j_props(file):
-  import params
-
-  property_map = {
-    'ambari.jobhistory.database': params.ambari_db_rca_url,
-    'ambari.jobhistory.driver': params.ambari_db_rca_driver,
-    'ambari.jobhistory.user': params.ambari_db_rca_username,
-    'ambari.jobhistory.password': params.ambari_db_rca_password,
-    'ambari.jobhistory.logger': 'DEBUG,JHA',
-
-    'log4j.appender.JHA': 'org.apache.ambari.log4j.hadoop.mapreduce.jobhistory.JobHistoryAppender',
-    'log4j.appender.JHA.database': '${ambari.jobhistory.database}',
-    'log4j.appender.JHA.driver': '${ambari.jobhistory.driver}',
-    'log4j.appender.JHA.user': '${ambari.jobhistory.user}',
-    'log4j.appender.JHA.password': '${ambari.jobhistory.password}',
-
-    'log4j.logger.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger': '${ambari.jobhistory.logger}',
-    'log4j.additivity.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger': 'true'
-  }
-  for key in property_map:
-    value = property_map[key]
-    Execute(format(
-      "sed -i 's~\\({rca_disabled_prefix}\\)\\?{key}=.*~{rca_prefix}{key}={value}~' {file}"))
-
-
-def generate_include_file():
-  import params
-
-  if params.dfs_hosts and params.has_slaves:
-    include_hosts_list = params.slave_hosts
-    File(params.dfs_hosts,
-         content=Template("include_hosts_list.j2"),
-         owner=params.hdfs_user,
-         group=params.user_group
-    )
-
-
-def install_snappy():
-  import params
-
-  snappy_so = "libsnappy.so"
-  so_target_dir_x86 = format("{hadoop_lib_home}/native/Linux-i386-32")
-  so_target_dir_x64 = format("{hadoop_lib_home}/native/Linux-amd64-64")
-  so_target_x86 = format("{so_target_dir_x86}/{snappy_so}")
-  so_target_x64 = format("{so_target_dir_x64}/{snappy_so}")
-  so_src_dir_x86 = format("{hadoop_home}/lib")
-  so_src_dir_x64 = format("{hadoop_home}/lib64")
-  so_src_x86 = format("{so_src_dir_x86}/{snappy_so}")
-  so_src_x64 = format("{so_src_dir_x64}/{snappy_so}")
-  Execute(
-    format("mkdir -p {so_target_dir_x86}; ln -sf {so_src_x86} {so_target_x86}"))
-  Execute(
-    format("mkdir -p {so_target_dir_x64}; ln -sf {so_src_x64} {so_target_x64}"))

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/commons-logging.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/commons-logging.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/commons-logging.properties.j2
deleted file mode 100644
index 77e458f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/commons-logging.properties.j2
+++ /dev/null
@@ -1,25 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-#Logging Implementation
-
-#Log4J
-org.apache.commons.logging.Log=org.apache.commons.logging.impl.Log4JLogger
-
-#JDK Logger
-#org.apache.commons.logging.Log=org.apache.commons.logging.impl.Jdk14Logger

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/exclude_hosts_list.j2
deleted file mode 100644
index bb5795b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/exclude_hosts_list.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-{% for host in hdfs_exclude_file %}
-{{host}}
-{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/hadoop-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/hadoop-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/hadoop-env.sh.j2
deleted file mode 100644
index 51e2bac..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/hadoop-env.sh.j2
+++ /dev/null
@@ -1,121 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-# Set Hadoop-specific environment variables here.
-
-# The only required environment variable is JAVA_HOME.  All others are
-# optional.  When running a distributed configuration it is best to
-# set JAVA_HOME in this file, so that it is correctly defined on
-# remote nodes.
-
-# The java implementation to use.  Required.
-export JAVA_HOME={{java_home}}
-export HADOOP_HOME_WARN_SUPPRESS=1
-
-# Hadoop Configuration Directory
-#TODO: if env var set that can cause problems
-export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
-
-# this is different for HDP1 #
-# Path to jsvc required by secure HDP 2.0 datanode
-# export JSVC_HOME={{jsvc_path}}
-
-
-# The maximum amount of heap to use, in MB. Default is 1000.
-export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
-
-export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
-
-# Extra Java runtime options.  Empty by default.
-export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
-
-# Command specific options appended to HADOOP_OPTS when specified
-export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
-HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
-
-HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
-HADOOP_DATANODE_OPTS="-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
-HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
-
-export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
-
-# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
-export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
-# On secure datanodes, user to run the datanode as after dropping privileges
-export HADOOP_SECURE_DN_USER={{hdfs_user}}
-
-# Extra ssh options.  Empty by default.
-export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
-
-# Where log files are stored.  $HADOOP_HOME/logs by default.
-export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
-
-# History server logs
-export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
-
-# Where log files are stored in the secure data environment.
-export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
-
-# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
-# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
-
-# host:path where hadoop code should be rsync'd from.  Unset by default.
-# export HADOOP_MASTER=master:/home/$USER/src/hadoop
-
-# Seconds to sleep between slave commands.  Unset by default.  This
-# can be useful in large clusters, where, e.g., slave rsyncs can
-# otherwise arrive faster than the master can service them.
-# export HADOOP_SLAVE_SLEEP=0.1
-
-# The directory where pid files are stored. /tmp by default.
-export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
-export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
-
-# History server pid
-export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
-
-YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
-
-# A string representing this instance of hadoop. $USER by default.
-export HADOOP_IDENT_STRING=$USER
-
-# The scheduling priority for daemon processes.  See 'man nice'.
-
-# export HADOOP_NICENESS=10
-
-# Use libraries from standard classpath
-JAVA_JDBC_LIBS=""
-#Add libraries required by mysql connector
-for jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`
-do
-  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
-done
-#Add libraries required by oracle connector
-for jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`
-do
-  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
-done
-#Add libraries required by nodemanager
-MAPREDUCE_LIBS={{mapreduce_libs_path}}
-export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
-
-# Setting path to hdfs command line
-export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
-
-#Mostly required for hadoop 2.0
-export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/hadoop-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/hadoop-metrics2.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/hadoop-metrics2.properties.j2
deleted file mode 100644
index a6a66ef..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/hadoop-metrics2.properties.j2
+++ /dev/null
@@ -1,45 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# syntax: [prefix].[source|sink|jmx].[instance].[options]
-# See package.html for org.apache.hadoop.metrics2 for details
-
-{% if has_ganglia_server %}
-*.period=60
-
-*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
-*.sink.ganglia.period=10
-
-# default for supportsparse is false
-*.sink.ganglia.supportsparse=true
-
-.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
-.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
-
-# Hook up to the server
-namenode.sink.ganglia.servers={{ganglia_server_host}}:8661
-datanode.sink.ganglia.servers={{ganglia_server_host}}:8659
-jobtracker.sink.ganglia.servers={{ganglia_server_host}}:8662
-tasktracker.sink.ganglia.servers={{ganglia_server_host}}:8658
-maptask.sink.ganglia.servers={{ganglia_server_host}}:8660
-reducetask.sink.ganglia.servers={{ganglia_server_host}}:8660
-resourcemanager.sink.ganglia.servers={{ganglia_server_host}}:8664
-nodemanager.sink.ganglia.servers={{ganglia_server_host}}:8657
-historyserver.sink.ganglia.servers={{ganglia_server_host}}:8666
-journalnode.sink.ganglia.servers={{ganglia_server_host}}:8654
-
-resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
-
-{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/hdfs.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/hdfs.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/hdfs.conf.j2
deleted file mode 100644
index ca7baa2..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/hdfs.conf.j2
+++ /dev/null
@@ -1,17 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-{{hdfs_user}}   - nofile 32768
-{{hdfs_user}}   - nproc  65536

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/health_check-v2.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/health_check-v2.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/health_check-v2.j2
deleted file mode 100644
index cb7b12b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/health_check-v2.j2
+++ /dev/null
@@ -1,91 +0,0 @@
-#!/bin/bash
-#
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-err=0;
-
-function check_disks {
-
-  for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do
-    fsdev=""
-    fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`;
-    if [ -z "$fsdev" -a "$m" != "/mnt" ] ; then
-      msg_="$msg_ $m(u)"
-    else
-      msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`"
-    fi
-  done
-
-  if [ -z "$msg_" ] ; then
-    echo "disks ok" ; exit 0
-  else
-    echo "$msg_" ; exit 2
-  fi
-
-}
-
-function check_link {
-  snmp=/usr/bin/snmpwalk
-  if [ -e $snmp ] ; then
-    $snmp -t 5 -Oe  -Oq  -Os -v 1 -c public localhost if | \
-    awk ' {
-      split($1,a,".") ;
-      if ( a[1] == "ifIndex" ) { ifIndex[a[2]] = $2 }
-      if ( a[1] == "ifDescr" ) { ifDescr[a[2]] = $2 }
-      if ( a[1] == "ifType" ) { ifType[a[2]] = $2 }
-      if ( a[1] == "ifSpeed" ) { ifSpeed[a[2]] = $2 }
-      if ( a[1] == "ifAdminStatus" ) { ifAdminStatus[a[2]] = $2 }
-      if ( a[1] == "ifOperStatus" ) { ifOperStatus[a[2]] = $2 }
-    }
-    END {
-      up=0;
-      for (i in ifIndex ) {
-      if ( ifType[i] == 6 && ifAdminStatus[i] == 1 && ifOperStatus[i] == 1 && ifSpeed[i] == 1000000000 ) {
-      up=i;
-      }
-      }
-      if ( up == 0 ) { print "check link" ; exit 2 }
-      else { print ifDescr[up],"ok" }
-    }'
-    exit $? ;
-  fi
-}
-
-# Run all checks
-# Disabled 'check_link' for now... 
-for check in disks ; do
-  msg=`check_${check}` ;
-  if [ $? -eq 0 ] ; then
-    ok_msg="$ok_msg$msg,"
-  else
-    err_msg="$err_msg$msg,"
-  fi
-done
-
-if [ ! -z "$err_msg" ] ; then
-  echo -n "ERROR $err_msg "
-fi
-if [ ! -z "$ok_msg" ] ; then
-  echo -n "OK: $ok_msg"
-fi
-
-echo
-
-# Success!
-exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/health_check.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/health_check.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/health_check.j2
deleted file mode 100644
index b84b336..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/health_check.j2
+++ /dev/null
@@ -1,118 +0,0 @@
-#!/bin/bash
-#
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-err=0;
-
-function check_disks {
-
-  for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do
-    fsdev=""
-    fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`;
-    if [ -z "$fsdev" ] ; then
-      msg_="$msg_ $m(u)"
-    else
-      msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`"
-    fi
-  done
-
-  if [ -z "$msg_" ] ; then
-    echo "disks ok" ; exit 0
-  else
-    echo "$msg_" ; exit 2
-  fi
-
-}
-
-function check_taskcontroller {
-  if [ "<%=scope.function_hdp_template_var("::hdp::params::security_enabled")%>" == "true" ]; then
-    perm=`stat -c %a:%U:%G <%=scope.function_hdp_template_var("task_bin_exe")%> 2>/dev/null`
-    if [ $? -eq 0 ] && [ "$perm" == "6050:root:hadoop" ] ; then
-      echo "taskcontroller ok"
-    else
-      echo 'check taskcontroller' ; exit 1
-    fi
-  fi
-}
-
-function check_jetty {
-  hname=`hostname`
-  jmx=`curl -s -S -m 5 "http://$hname:<%=scope.function_hdp_template_var("::hdp::tasktracker_port")%>/jmx?qry=Hadoop:service=TaskTracker,name=ShuffleServerMetrics" 2>/dev/null` ;
-  if [ $? -eq 0 ] ; then
-    e=`echo $jmx | awk '/shuffle_exceptions_caught/ {printf"%d",$2}'` ;
-    e=${e:-0} # no jmx servlet ?
-    if [ $e -gt 10 ] ; then
-      echo "check jetty: shuffle_exceptions=$e" ; exit 1
-    else
-      echo "jetty ok"
-    fi
-  else
-    echo "check jetty: ping failed" ; exit 1
-  fi
-}
-
-function check_link {
-  snmp=/usr/bin/snmpwalk
-  if [ -e $snmp ] ; then
-    $snmp -t 5 -Oe  -Oq  -Os -v 1 -c public localhost if | \
-    awk ' {
-      split($1,a,".") ;
-      if ( a[1] == "ifIndex" ) { ifIndex[a[2]] = $2 }
-      if ( a[1] == "ifDescr" ) { ifDescr[a[2]] = $2 }
-      if ( a[1] == "ifType" ) { ifType[a[2]] = $2 }
-      if ( a[1] == "ifSpeed" ) { ifSpeed[a[2]] = $2 }
-      if ( a[1] == "ifAdminStatus" ) { ifAdminStatus[a[2]] = $2 }
-      if ( a[1] == "ifOperStatus" ) { ifOperStatus[a[2]] = $2 }
-    }
-    END {
-      up=0;
-      for (i in ifIndex ) {
-      if ( ifType[i] == 6 && ifAdminStatus[i] == 1 && ifOperStatus[i] == 1 && ifSpeed[i] == 1000000000 ) {
-      up=i;
-      }
-      }
-      if ( up == 0 ) { print "check link" ; exit 2 }
-      else { print ifDescr[up],"ok" }
-    }'
-    exit $? ;
-  fi
-}
-
-# Run all checks
-# Disabled 'check_link' for now... 
-for check in disks taskcontroller jetty; do
-  msg=`check_${check}` ;
-  if [ $? -eq 0 ] ; then
-    ok_msg="$ok_msg$msg,"
-  else
-    err_msg="$err_msg$msg,"
-  fi
-done
-
-if [ ! -z "$err_msg" ] ; then
-  echo -n "ERROR $err_msg "
-fi
-if [ ! -z "$ok_msg" ] ; then
-  echo -n "OK: $ok_msg"
-fi
-
-echo
-
-# Success!
-exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/include_hosts_list.j2
deleted file mode 100644
index cbcf6c3..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/include_hosts_list.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-{% for host in slave_hosts %}
-{{host}}
-{% endfor %}


[06/12] AMBARI-4336. Move 1.3.4 stack to 1.3.3 using the python libraries. (mahadev)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hive_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hive_service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hive_service.py
deleted file mode 100644
index e8d4e5c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hive_service.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-
-def hive_service(
-    name,
-    action='start'):
-
-  import params
-
-  if name == 'metastore':
-    pid_file = format("{hive_pid_dir}/{hive_metastore_pid}")
-    cmd = format(
-      "env HADOOP_HOME={hadoop_home} JAVA_HOME={java64_home} {start_metastore_path} {hive_log_dir}/hive.out {hive_log_dir}/hive.log {pid_file} {hive_server_conf_dir}")
-  elif name == 'hiveserver2':
-    pid_file = format("{hive_pid_dir}/{hive_pid}")
-    cmd = format(
-      "env JAVA_HOME={java64_home} {start_hiveserver2_path} {hive_log_dir}/hive-server2.out {hive_log_dir}/hive-server2.log {pid_file} {hive_server_conf_dir}")
-
-  if action == 'start':
-    demon_cmd = format("{cmd}")
-    no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
-    Execute(demon_cmd,
-            user=params.hive_user,
-            not_if=no_op_test
-    )
-
-    if params.hive_jdbc_driver == "com.mysql.jdbc.Driver" or params.hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
-      db_connection_check_command = format(
-        "{java64_home}/bin/java -cp {check_db_connection_jar}:/usr/share/java/{jdbc_jar_name} org.apache.ambari.server.DBConnectionVerification {hive_jdbc_connection_url} {hive_metastore_user_name} {hive_metastore_user_passwd} {hive_jdbc_driver}")
-      Execute(db_connection_check_command,
-              path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin')
-
-  elif action == 'stop':
-    demon_cmd = format("kill `cat {pid_file}` >/dev/null 2>&1 && rm -f {pid_file}")
-    Execute(demon_cmd)
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/mysql_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/mysql_server.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/mysql_server.py
deleted file mode 100644
index a45d310..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/mysql_server.py
+++ /dev/null
@@ -1,77 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from mysql_service import mysql_service
-
-class MysqlServer(Script):
-
-  if System.get_instance().platform == "suse":
-    daemon_name = 'mysql'
-  else:
-    daemon_name = 'mysqld'
-
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    mysql_service(daemon_name=self.daemon_name, action='start')
-
-    File(params.mysql_adduser_path,
-         mode=0755,
-         content=StaticFile('addMysqlUser.sh')
-    )
-
-    # Autoescaping
-    cmd = ("bash", "-x", params.mysql_adduser_path, self.daemon_name,
-           params.hive_metastore_user_name, str(params.hive_metastore_user_passwd) , params.mysql_host[0])
-
-    Execute(cmd,
-            tries=3,
-            try_sleep=5,
-            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-            logoutput=True
-    )
-
-    mysql_service(daemon_name=self.daemon_name, action='stop')
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-
-    mysql_service(daemon_name=self.daemon_name, action = 'start')
-
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    mysql_service(daemon_name=self.daemon_name, action = 'stop')
-
-  def status(self, env):
-    mysql_service(daemon_name=self.daemon_name, action = 'status')
-
-if __name__ == "__main__":
-  MysqlServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/mysql_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/mysql_service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/mysql_service.py
deleted file mode 100644
index cfb3e08..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/mysql_service.py
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-
-def mysql_service(daemon_name=None, action='start'):
-  cmd = format('service {daemon_name} {action}')
-
-  if action == 'status':
-    logoutput = False
-  else:
-    logoutput = True
-
-  Execute(cmd,
-          path="/usr/local/bin/:/bin/:/sbin/",
-          tries=1,
-          logoutput=logoutput)
-
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/params.py
deleted file mode 100644
index 0cf89be..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/params.py
+++ /dev/null
@@ -1,123 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import status_params
-
-# server configurations
-config = Script.get_config()
-
-hive_metastore_user_name = config['configurations']['hive-site']['javax.jdo.option.ConnectionUserName']
-hive_server_conf_dir = "/etc/hive/conf.server"
-hive_jdbc_connection_url = config['configurations']['hive-site']['javax.jdo.option.ConnectionURL']
-
-hive_metastore_user_passwd = config['configurations']['hive-site']['javax.jdo.option.ConnectionPassword']
-
-#users
-hive_user = config['configurations']['global']['hive_user']
-hive_lib = '/usr/lib/hive/lib/'
-#JDBC driver jar name
-hive_jdbc_driver = default('hive_jdbc_driver', 'com.mysql.jdbc.Driver')
-if hive_jdbc_driver == "com.mysql.jdbc.Driver":
-  jdbc_jar_name = "mysql-connector-java.jar"
-elif hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
-  jdbc_jar_name = "ojdbc6.jar"
-
-check_db_connection_jar_name = "DBConnectionVerification.jar"
-check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
-
-#common
-hive_metastore_port = config['configurations']['global']['hive_metastore_port']
-hive_var_lib = '/var/lib/hive'
-hive_server_host = config['clusterHostInfo']['hive_server_host']
-hive_url = format("jdbc:hive2://{hive_server_host}:10000")
-
-smokeuser = config['configurations']['global']['smokeuser']
-smoke_test_sql = "/tmp/hiveserver2.sql"
-smoke_test_path = "/tmp/hiveserver2Smoke.sh"
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-
-security_enabled = config['configurations']['global']['security_enabled']
-
-kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-hive_metastore_keytab_path =  config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file']
-
-#hive_env
-hive_conf_dir = "/etc/hive/conf"
-hive_dbroot = config['configurations']['global']['hive_dbroot']
-hive_log_dir = config['configurations']['global']['hive_log_dir']
-hive_pid_dir = status_params.hive_pid_dir
-hive_pid = status_params.hive_pid
-
-#hive-site
-hive_database_name = config['configurations']['global']['hive_database_name']
-
-#Starting hiveserver2
-start_hiveserver2_script = 'startHiveserver2.sh'
-
-hadoop_home = '/usr'
-
-##Starting metastore
-start_metastore_script = 'startMetastore.sh'
-hive_metastore_pid = status_params.hive_metastore_pid
-java_share_dir = '/usr/share/java'
-driver_curl_target = format("{java_share_dir}/{jdbc_jar_name}")
-
-hdfs_user =  config['configurations']['global']['hdfs_user']
-user_group = config['configurations']['global']['user_group']
-artifact_dir = "/tmp/HDP-artifacts/"
-
-target = format("{hive_lib}/{jdbc_jar_name}")
-
-jdk_location = config['hostLevelParams']['jdk_location']
-driver_curl_source = format("{jdk_location}/{jdbc_jar_name}")
-
-start_hiveserver2_path = "/tmp/start_hiveserver2_script"
-start_metastore_path = "/tmp/start_metastore_script"
-
-hive_aux_jars_path = config['configurations']['global']['hive_aux_jars_path']
-hadoop_heapsize = config['configurations']['global']['hadoop_heapsize']
-java64_home = config['hostLevelParams']['java_home']
-
-##### MYSQL
-
-db_name = config['configurations']['global']['hive_database_name']
-mysql_user = "mysql"
-mysql_group = 'mysql'
-mysql_host = config['clusterHostInfo']['hive_mysql_host']
-
-mysql_adduser_path = "/tmp/addMysqlUser.sh"
-
-########## HCAT
-
-hcat_conf_dir = '/etc/hcatalog/conf'
-
-metastore_port = 9933
-hcat_lib = '/usr/lib/hcatalog/share/hcatalog'
-
-hcat_dbroot = hcat_lib
-
-hcat_user = config['configurations']['global']['hcat_user']
-webhcat_user = config['configurations']['global']['webhcat_user']
-
-hcat_pid_dir = status_params.hcat_pid_dir
-hcat_log_dir = config['configurations']['global']['hcat_log_dir']   #hcat_log_dir
-
-hadoop_conf_dir = '/etc/hadoop/conf'

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/service_check.py
deleted file mode 100644
index 111e8a1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/service_check.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-from hcat_service_check import hcat_service_check
-
-class HiveServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-    if params.security_enabled:
-      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser};")
-      hive_principal_ext = format("principal={hive_metastore_keytab_path}")
-      hive_url_ext = format("{hive_url}/\\;{hive_principal_ext}")
-      smoke_cmd = format("{kinit_cmd} env JAVA_HOME={java64_home} {smoke_test_path} {hive_url_ext} {smoke_test_sql}")
-    else:
-      smoke_cmd = format("env JAVA_HOME={java64_home} {smoke_test_path} {hive_url} {smoke_test_sql}")
-
-    File(params.smoke_test_path,
-         content=StaticFile('hiveserver2Smoke.sh'),
-         mode=0755
-    )
-
-    File(params.smoke_test_sql,
-         content=StaticFile('hiveserver2.sql')
-    )
-
-    Execute(smoke_cmd,
-            tries=3,
-            try_sleep=5,
-            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-            logoutput=True,
-            user=params.smokeuser)
-
-    hcat_service_check()
-
-if __name__ == "__main__":
-  HiveServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/status_params.py
deleted file mode 100644
index 7770975..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/status_params.py
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-hive_pid_dir = config['configurations']['global']['hive_pid_dir']
-hive_pid = 'hive-server.pid'
-
-hive_metastore_pid = 'hive.pid'
-
-hcat_pid_dir = config['configurations']['global']['hcat_pid_dir'] #hcat_pid_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/templates/hcat-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/templates/hcat-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/templates/hcat-env.sh.j2
deleted file mode 100644
index 2a35240..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/templates/hcat-env.sh.j2
+++ /dev/null
@@ -1,25 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-JAVA_HOME={{java64_home}}
-HCAT_PID_DIR={{hcat_pid_dir}}/
-HCAT_LOG_DIR={{hcat_log_dir}}/
-HCAT_CONF_DIR={{hcat_conf_dir}}
-HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
-#DBROOT is the path where the connector jars are downloaded
-DBROOT={{hcat_dbroot}}
-USER={{hcat_user}}
-METASTORE_PORT={{metastore_port}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/templates/hive-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/templates/hive-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/templates/hive-env.sh.j2
deleted file mode 100644
index 548262a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/templates/hive-env.sh.j2
+++ /dev/null
@@ -1,55 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Set Hive and Hadoop environment variables here. These variables can be used
-# to control the execution of Hive. It should be used by admins to configure
-# the Hive installation (so that users do not have to set environment variables
-# or set command line parameters to get correct behavior).
-#
-# The hive service being invoked (CLI/HWI etc.) is available via the environment
-# variable SERVICE
-
-# Hive Client memory usage can be an issue if a large number of clients
-# are running at the same time. The flags below have been useful in
-# reducing memory usage:
-#
- if [ "$SERVICE" = "cli" ]; then
-   if [ -z "$DEBUG" ]; then
-     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
-   else
-     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
-   fi
- fi
-
-# The heap size of the jvm stared by hive shell script can be controlled via:
-
-export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
-export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
-
-# Larger heap size may be required when running queries over large number of files or partitions.
-# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
-# appropriate for hive server (hwi etc).
-
-
-# Set HADOOP_HOME to point to a specific hadoop install directory
-HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
-
-# Hive Configuration Directory can be controlled by:
-export HIVE_CONF_DIR={{conf_dir}}
-
-# Folder containing extra ibraries required for hive compilation/execution can be controlled by:
-# export HIVE_AUX_JARS_PATH=
-export HIVE_AUX_JARS_PATH={{hive_aux_jars_path}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HUE/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HUE/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HUE/configuration/global.xml
deleted file mode 100644
index c49480f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HUE/configuration/global.xml
+++ /dev/null
@@ -1,35 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>hue_pid_dir</name>
-    <value>/var/run/hue</value>
-    <description>Hue Pid Dir.</description>
-  </property>
-  <property>
-    <name>hue_log_dir</name>
-    <value>/var/log/hue</value>
-    <description>Hue Log Dir.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HUE/configuration/hue-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HUE/configuration/hue-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HUE/configuration/hue-site.xml
deleted file mode 100644
index 6eb52a2..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HUE/configuration/hue-site.xml
+++ /dev/null
@@ -1,290 +0,0 @@
-<?xml version="1.0"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more# Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with# contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.# this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0# The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with# (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at# the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0#     http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software# Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,# distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and# See the License for the specific language governing permissions and
-   limitations under the License.# limitations under the License.
--->
-
-<configuration>
-  <!-- General Hue server configuration properties -->
-  <property>
-      <name>send_debug_messages</name>
-      <value>1</value>
-      <description></description>
-  </property>
-
-  <property>
-    <name>database_logging</name>
-    <value>0</value>
-    <description>To show database transactions, set database_logging to 1.
-      default, database_logging=0</description>
-  </property>
-
-  <property>
-    <name>secret_key</name>
-    <value></value>
-    <description>This is used for secure hashing in the session store.</description>
-  </property>
-
-  <property>
-    <name>http_host</name>
-    <value>0.0.0.0</value>
-    <description>Webserver listens on this address and port</description>
-  </property>
-
-  <property>
-    <name>http_port</name>
-    <value>8000</value>
-    <description>Webserver listens on this address and port</description>
-  </property>
-
-  <property>
-    <name>time_zone</name>
-    <value>America/Los_Angeles</value>
-    <description>Time zone name</description>
-  </property>
-
-  <property>
-    <name>django_debug_mode</name>
-    <value>1</value>
-    <description>Turn off debug</description>
-  </property>
-
-  <property>
-    <name>use_cherrypy_server</name>
-    <value>false</value>
-    <description>Set to true to use CherryPy as the webserver, set to false
-      to use Spawning as the webserver. Defaults to Spawning if
-      key is not specified.</description>
-  </property>
-
-  <property>
-    <name>http_500_debug_mode</name>
-    <value>1</value>
-    <description>Turn off backtrace for server error</description>
-  </property>
-
-  <property>
-    <name>server_user</name>
-    <value></value>
-    <description>Webserver runs as this user</description>
-  </property>
-
-  <property>
-    <name>server_group</name>
-    <value></value>
-    <description>Webserver runs as this user</description>
-  </property>
-
-  <property>
-    <name>backend_auth_policy</name>
-    <value>desktop.auth.backend.AllowAllBackend</value>
-    <description>Authentication backend.</description>
-  </property>
-
-  <!-- Hue Database configuration properties -->
-  <property>
-    <name>db_engine</name>
-    <value>mysql</value>
-    <description>Configuration options for specifying the Desktop Database.</description>
-  </property>
-
-  <property>
-    <name>db_host</name>
-    <value>localhost</value>
-    <description>Configuration options for specifying the Desktop Database.</description>
-  </property>
-
-  <property>
-    <name>db_port</name>
-    <value>3306</value>
-    <description>Configuration options for specifying the Desktop Database.</description>
-  </property>
-
-  <property>
-    <name>db_user</name>
-    <value>sandbox</value>
-    <description>Configuration options for specifying the Desktop Database.</description>
-  </property>
-
-  <property>
-    <name>db_password</name>
-    <value>1111</value>
-    <description>Configuration options for specifying the Desktop Database.</description>
-  </property>
-
-  <property>
-    <name>db_name</name>
-    <value>sandbox</value>
-    <description>Configuration options for specifying the Desktop Database.</description>
-  </property>
-
-  <!-- Hue Email configuration properties -->
-  <property>
-    <name>smtp_host</name>
-    <value>localhost</value>
-    <description>The SMTP server information for email notification delivery.</description>
-  </property>
-
-  <property>
-    <name>smtp_port</name>
-    <value>25</value>
-    <description>The SMTP server information for email notification delivery.</description>
-  </property>
-
-  <property>
-    <name>smtp_user</name>
-    <value></value>
-    <description>The SMTP server information for email notification delivery.</description>
-  </property>
-
-  <property>
-    <name>smtp_password</name>
-    <value>25</value>
-    <description>The SMTP server information for email notification delivery.</description>
-  </property>
-
-  <property>
-    <name>tls</name>
-    <value>no</value>
-    <description>Whether to use a TLS (secure) connection when talking to the SMTP server.</description>
-  </property>
-
-  <property>
-    <name>default_from_email</name>
-    <value>sandbox@hortonworks.com</value>
-    <description>The SMTP server information for email notification delivery.</description>
-  </property>
-
-  <!-- Hue Hadoop configuration properties -->
-  <property>
-    <name>fs_defaultfs</name>
-    <value></value>
-    <description>Enter the filesystem uri. E.g
-      .:hdfs://sandbox:8020</description>
-  </property>
-
-  <property>
-    <name>webhdfs_url</name>
-    <value></value>
-    <description>Use WebHdfs/HttpFs as the communication mechanism. To fallback to
-      using the Thrift plugin (used in Hue 1.x), this must be uncommented
-      and explicitly set to the empty value.
-      Value e.g.: http://localhost:50070/webhdfs/v1/</description>
-  </property>
-
-  <property>
-    <name>jobtracker_host</name>
-    <value></value>
-    <description>Enter the host on which you are running the Hadoop JobTracker.</description>
-  </property>
-
-  <property>
-    <name>jobtracker_port</name>
-    <value>50030</value>
-    <description>The port where the JobTracker IPC listens on.</description>
-  </property>
-
-  <property>
-    <name>hadoop_mapred_home</name>
-    <value>/usr/lib/hadoop/lib</value>
-    <description>The SMTP server information for email notification delivery.</description>
-  </property>
-
-  <property>
-    <name>resourcemanager_host</name>
-    <value></value>
-    <description>Enter the host on which you are running the ResourceManager.</description>
-  </property>
-
-  <property>
-    <name>resourcemanager_port</name>
-    <value></value>
-    <description>The port where the ResourceManager IPC listens on.</description>
-  </property>
-
-  <!-- Hue Beeswax configuration properties -->
-  <property>
-    <name>hive_home_dir</name>
-    <value></value>
-    <description>Hive home directory.</description>
-  </property>
-
-  <property>
-    <name>hive_conf_dir</name>
-    <value></value>
-    <description>Hive configuration directory, where hive-site.xml is
-      located.</description>
-  </property>
-
-  <property>
-    <name>templeton_url</name>
-    <value></value>
-    <description>WebHcat http URL</description>
-  </property>
-
-  <!-- Hue shell types configuration -->
-  <property>
-    <name>pig_nice_name</name>
-    <value></value>
-    <description>Define and configure a new shell type pig</description>
-  </property>
-
-  <property>
-    <name>pig_shell_command</name>
-    <value>/usr/bin/pig -l /dev/null</value>
-    <description>Define and configure a new shell type pig.</description>
-  </property>
-
-  <property>
-    <name>pig_java_home</name>
-    <value></value>
-    <description>Define and configure a new shell type pig.</description>
-  </property>
-
-  <property>
-    <name>hbase_nice_name</name>
-    <value>HBase Shell</value>
-    <description>Define and configure a new shell type hbase</description>
-  </property>
-
-  <property>
-    <name>hbase_shell_command</name>
-    <value>/usr/bin/hbase shell</value>
-    <description>Define and configure a new shell type hbase.</description>
-  </property>
-
-  <property>
-    <name>bash_nice_name</name>
-    <value></value>
-    <description>Define and configure a new shell type bash for testing
-      only</description>
-  </property>
-
-  <property>
-    <name>bash_shell_command</name>
-    <value>/bin/bash</value>
-    <description>Define and configure a new shell type bash for testing only
-      .</description>
-  </property>
-
-  <!-- Hue Settings for the User Admin application -->
-  <property>
-    <name>whitelist</name>
-    <value>(localhost|127\.0\.0\.1):(50030|50070|50060|50075|50111)</value>
-    <description>proxy settings</description>
-  </property>
-
-</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HUE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HUE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HUE/metainfo.xml
deleted file mode 100644
index 0a6b59e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HUE/metainfo.xml
+++ /dev/null
@@ -1,32 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Hue is a graphical user interface to operate and develop
-      applications for Apache Hadoop.</comment>
-    <version>2.2.0.1.3.3.0</version>
-
-    <components>
-        <component>
-            <name>HUE_SERVER</name>
-            <category>MASTER</category>
-            <cardinality>1</cardinality>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/configuration/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/configuration/capacity-scheduler.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/configuration/capacity-scheduler.xml
deleted file mode 100644
index 8034d19..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/configuration/capacity-scheduler.xml
+++ /dev/null
@@ -1,195 +0,0 @@
-<?xml version="1.0"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- This is the configuration file for the resource manager in Hadoop. -->
-<!-- You can configure various scheduling parameters related to queues. -->
-<!-- The properties for a queue follow a naming convention,such as, -->
-<!-- mapred.capacity-scheduler.queue.<queue-name>.property-name. -->
-
-<configuration>
-
-  <property>
-    <name>mapred.capacity-scheduler.maximum-system-jobs</name>
-    <value>3000</value>
-    <description>Maximum number of jobs in the system which can be initialized,
-     concurrently, by the CapacityScheduler.
-    </description>    
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.capacity</name>
-    <value>100</value>
-    <description>Percentage of the number of slots in the cluster that are
-      to be available for jobs in this queue.
-    </description>    
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.maximum-capacity</name>
-    <value>-1</value>
-    <description>
-	maximum-capacity defines a limit beyond which a queue cannot use the capacity of the cluster.
-	This provides a means to limit how much excess capacity a queue can use. By default, there is no limit.
-	The maximum-capacity of a queue can only be greater than or equal to its minimum capacity.
-        Default value of -1 implies a queue can use complete capacity of the cluster.
-
-        This property could be to curtail certain jobs which are long running in nature from occupying more than a 
-        certain percentage of the cluster, which in the absence of pre-emption, could lead to capacity guarantees of 
-        other queues being affected.
-        
-        One important thing to note is that maximum-capacity is a percentage , so based on the cluster's capacity
-        the max capacity would change. So if large no of nodes or racks get added to the cluster , max Capacity in 
-        absolute terms would increase accordingly.
-    </description>    
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.supports-priority</name>
-    <value>false</value>
-    <description>If true, priorities of jobs will be taken into 
-      account in scheduling decisions.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.minimum-user-limit-percent</name>
-    <value>100</value>
-    <description> Each queue enforces a limit on the percentage of resources 
-    allocated to a user at any given time, if there is competition for them. 
-    This user limit can vary between a minimum and maximum value. The former
-    depends on the number of users who have submitted jobs, and the latter is
-    set to this property value. For example, suppose the value of this 
-    property is 25. If two users have submitted jobs to a queue, no single 
-    user can use more than 50% of the queue resources. If a third user submits
-    a job, no single user can use more than 33% of the queue resources. With 4 
-    or more users, no user can use more than 25% of the queue's resources. A 
-    value of 100 implies no user limits are imposed. 
-    </description>
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.user-limit-factor</name>
-    <value>1</value>
-    <description>The multiple of the queue capacity which can be configured to 
-    allow a single user to acquire more slots. 
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks</name>
-    <value>200000</value>
-    <description>The maximum number of tasks, across all jobs in the queue, 
-    which can be initialized concurrently. Once the queue's jobs exceed this 
-    limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.maximum-initialized-active-tasks-per-user</name>
-    <value>100000</value>
-    <description>The maximum number of tasks per-user, across all the of the 
-    user's jobs in the queue, which can be initialized concurrently. Once the 
-    user's jobs exceed this limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.queue.default.init-accept-jobs-factor</name>
-    <value>10</value>
-    <description>The multipe of (maximum-system-jobs * queue-capacity) used to 
-    determine the number of jobs which are accepted by the scheduler.  
-    </description>
-  </property>
-
-  <!-- The default configuration settings for the capacity task scheduler -->
-  <!-- The default values would be applied to all the queues which don't have -->
-  <!-- the appropriate property for the particular queue -->
-  <property>
-    <name>mapred.capacity-scheduler.default-supports-priority</name>
-    <value>false</value>
-    <description>If true, priorities of jobs will be taken into 
-      account in scheduling decisions by default in a job queue.
-    </description>
-  </property>
-  
-  <property>
-    <name>mapred.capacity-scheduler.default-minimum-user-limit-percent</name>
-    <value>100</value>
-    <description>The percentage of the resources limited to a particular user
-      for the job queue at any given point of time by default.
-    </description>
-  </property>
-
-
-  <property>
-    <name>mapred.capacity-scheduler.default-user-limit-factor</name>
-    <value>1</value>
-    <description>The default multiple of queue-capacity which is used to 
-    determine the amount of slots a single user can consume concurrently.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.default-maximum-active-tasks-per-queue</name>
-    <value>200000</value>
-    <description>The default maximum number of tasks, across all jobs in the 
-    queue, which can be initialized concurrently. Once the queue's jobs exceed 
-    this limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.default-maximum-active-tasks-per-user</name>
-    <value>100000</value>
-    <description>The default maximum number of tasks per-user, across all the of 
-    the user's jobs in the queue, which can be initialized concurrently. Once 
-    the user's jobs exceed this limit they will be queued on disk.  
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.capacity-scheduler.default-init-accept-jobs-factor</name>
-    <value>10</value>
-    <description>The default multipe of (maximum-system-jobs * queue-capacity) 
-    used to determine the number of jobs which are accepted by the scheduler.  
-    </description>
-  </property>
-
-  <!-- Capacity scheduler Job Initialization configuration parameters -->
-  <property>
-    <name>mapred.capacity-scheduler.init-poll-interval</name>
-    <value>5000</value>
-    <description>The amount of time in miliseconds which is used to poll 
-    the job queues for jobs to initialize.
-    </description>
-  </property>
-  <property>
-    <name>mapred.capacity-scheduler.init-worker-threads</name>
-    <value>5</value>
-    <description>Number of worker threads which would be used by
-    Initialization poller to initialize jobs in a set of queue.
-    If number mentioned in property is equal to number of job queues
-    then a single thread would initialize jobs in a queue. If lesser
-    then a thread would get a set of queues assigned. If the number
-    is greater then number of threads would be equal to number of 
-    job queues.
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/configuration/core-site.xml
deleted file mode 100644
index 3a2af49..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/configuration/core-site.xml
+++ /dev/null
@@ -1,20 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<configuration>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/configuration/global.xml
deleted file mode 100644
index 4633855..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/configuration/global.xml
+++ /dev/null
@@ -1,160 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>jobtracker_host</name>
-    <value></value>
-    <description>JobTracker Host.</description>
-  </property>
-  <property>
-    <name>tasktracker_hosts</name>
-    <value></value>
-    <description>TaskTracker hosts.</description>
-  </property>
-  <property>
-    <name>mapred_local_dir</name>
-    <value>/hadoop/mapred</value>
-    <description>MapRed Local Directories.</description>
-  </property>
-  <property>
-    <name>mapred_system_dir</name>
-    <value>/mapred/system</value>
-    <description>MapRed System Directories.</description>
-  </property>
-  <property>
-    <name>scheduler_name</name>
-    <value>org.apache.hadoop.mapred.CapacityTaskScheduler</value>
-    <description>MapRed Capacity Scheduler.</description>
-  </property>
-  <property>
-    <name>jtnode_opt_newsize</name>
-    <value>200</value>
-    <description>Mem New Size.</description>
-  </property>
-  <property>
-    <name>jtnode_opt_maxnewsize</name>
-    <value>200</value>
-    <description>Max New size.</description>
-  </property>
-  <property>
-    <name>hadoop_heapsize</name>
-    <value>1024</value>
-    <description>Hadoop maximum Java heap size</description>
-  </property>
-  <property>
-    <name>jtnode_heapsize</name>
-    <value>1024</value>
-    <description>Maximum Java heap size for JobTracker in MB (Java option -Xmx)</description>
-  </property>
-  <property>
-    <name>mapred_map_tasks_max</name>
-    <value>4</value>
-    <description>Number of slots that Map tasks that run simultaneously can occupy on a TaskTracker</description>
-  </property>
-  <property>
-    <name>mapred_red_tasks_max</name>
-    <value>2</value>
-    <description>Number of slots that Reduce tasks that run simultaneously can occupy on a TaskTracker</description>
-  </property>
-  <property>
-    <name>mapred_cluster_map_mem_mb</name>
-    <value>-1</value>
-    <description>The virtual memory size of a single Map slot in the MapReduce framework</description>
-  </property>
-  <property>
-    <name>mapred_cluster_red_mem_mb</name>
-    <value>-1</value>
-    <description>The virtual memory size of a single Reduce slot in the MapReduce framework</description>
-  </property>
-  <property>
-    <name>mapred_job_map_mem_mb</name>
-    <value>-1</value>
-    <description>Virtual memory for single Map task</description>
-  </property>
-  <property>
-    <name>mapred_child_java_opts_sz</name>
-    <value>768</value>
-    <description>Java options for the TaskTracker child processes.</description>
-  </property>
-  <property>
-    <name>io_sort_mb</name>
-    <value>200</value>
-    <description>The total amount of Map-side buffer memory to use while sorting files (Expert-only configuration).</description>
-  </property>
-  <property>
-    <name>io_sort_spill_percent</name>
-    <value>0.9</value>
-    <description>Percentage of sort buffer used for record collection (Expert-only configuration.</description>
-  </property>
-  <property>
-    <name>mapreduce_userlog_retainhours</name>
-    <value>24</value>
-    <description>The maximum time, in hours, for which the user-logs are to be retained after the job completion.</description>
-  </property>
-  <property>
-    <name>maxtasks_per_job</name>
-    <value>-1</value>
-    <description>Maximum number of tasks for a single Job</description>
-  </property>
-  <property>
-    <name>lzo_enabled</name>
-    <value>true</value>
-    <description>LZO compression enabled</description>
-  </property>
-  <property>
-    <name>snappy_enabled</name>
-    <value>true</value>
-    <description>LZO compression enabled</description>
-  </property>
-  <property>
-    <name>rca_enabled</name>
-    <value>true</value>
-    <description>Enable Job Diagnostics.</description>
-  </property>
-  <property>
-    <name>mapred_hosts_exclude</name>
-    <value></value>
-    <description>Exclude entered hosts</description>
-  </property>
-  <property>
-    <name>mapred_hosts_include</name>
-    <value></value>
-    <description>Include entered hosts</description>
-  </property>
-  <property>
-    <name>mapred_jobstatus_dir</name>
-    <value>/mapred/jobstatus</value>
-    <description>Job Status directory</description>
-  </property>
-  <property>
-    <name>task_controller</name>
-    <value>org.apache.hadoop.mapred.DefaultTaskController</value>
-    <description>Task Controller.</description>
-  </property>
-  <property>
-    <name>mapred_user</name>
-    <value>mapred</value>
-    <description>MapReduce User.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/configuration/mapred-queue-acls.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/configuration/mapred-queue-acls.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/configuration/mapred-queue-acls.xml
deleted file mode 100644
index ce12380..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/configuration/mapred-queue-acls.xml
+++ /dev/null
@@ -1,39 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- mapred-queue-acls.xml -->
-<configuration>
-
-
-<!-- queue default -->
-
-  <property>
-    <name>mapred.queue.default.acl-submit-job</name>
-    <value>*</value>
-  </property>
-
-  <property>
-    <name>mapred.queue.default.acl-administer-jobs</name>
-    <value>*</value>
-  </property>
-
-  <!-- END ACLs -->
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/configuration/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/configuration/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/configuration/mapred-site.xml
deleted file mode 100644
index 1db37a8..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/configuration/mapred-site.xml
+++ /dev/null
@@ -1,601 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-  <!-- i/o properties -->
-
-  <property>
-    <name>io.sort.mb</name>
-    <value>200</value>
-    <description>
-      The total amount of Map-side buffer memory to use while sorting files
-    </description>
-  </property>
-
-  <property>
-    <name>io.sort.record.percent</name>
-    <value>.2</value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>io.sort.spill.percent</name>
-    <value>0.9</value>
-    <description>Percentage of sort buffer used for record collection</description>
-  </property>
-
-  <property>
-    <name>io.sort.factor</name>
-    <value>100</value>
-    <description>No description</description>
-  </property>
-
-  <!-- map/reduce properties -->
-
-  <property>
-    <name>mapred.tasktracker.tasks.sleeptime-before-sigkill</name>
-    <value>250</value>
-    <description>Normally, this is the amount of time before killing
-      processes, and the recommended-default is 5.000 seconds - a value of
-      5000 here.  In this case, we are using it solely to blast tasks before
-      killing them, and killing them very quickly (1/4 second) to guarantee
-      that we do not leave VMs around for later jobs.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker.handler.count</name>
-    <value>50</value>
-    <description>
-      The number of server threads for the JobTracker. This should be roughly
-      4% of the number of tasktracker nodes.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.system.dir</name>
-    <value>/mapred/system</value>
-    <description>Path on the HDFS where where the MapReduce framework stores system files</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker</name>
-    <!-- cluster variant -->
-    <value>localhost:50300</value>
-    <description>JobTracker address</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker.http.address</name>
-    <!-- cluster variant -->
-    <value>localhost:50030</value>
-    <description>JobTracker host and http port address</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <!-- cluster specific -->
-    <name>mapred.local.dir</name>
-    <value>/hadoop/mapred</value>
-    <description>No description</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapreduce.cluster.administrators</name>
-    <value> hadoop</value>
-  </property>
-
-  <property>
-    <name>mapred.reduce.parallel.copies</name>
-    <value>30</value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.tasktracker.map.tasks.maximum</name>
-    <value>4</value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.tasktracker.reduce.tasks.maximum</name>
-    <value>2</value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>tasktracker.http.threads</name>
-    <value>50</value>
-  </property>
-
-  <property>
-    <name>mapred.map.tasks.speculative.execution</name>
-    <value>false</value>
-    <description>If true, then multiple instances of some map tasks
-      may be executed in parallel.</description>
-  </property>
-
-  <property>
-    <name>mapred.reduce.tasks.speculative.execution</name>
-    <value>false</value>
-    <description>If true, then multiple instances of some reduce tasks
-      may be executed in parallel.</description>
-  </property>
-
-  <property>
-    <name>mapred.reduce.slowstart.completed.maps</name>
-    <value>0.05</value>
-  </property>
-
-  <property>
-    <name>mapred.inmem.merge.threshold</name>
-    <value>1000</value>
-    <description>The threshold, in terms of the number of files
-      for the in-memory merge process. When we accumulate threshold number of files
-      we initiate the in-memory merge and spill to disk. A value of 0 or less than
-      0 indicates we want to DON'T have any threshold and instead depend only on
-      the ramfs's memory consumption to trigger the merge.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.shuffle.merge.percent</name>
-    <value>0.66</value>
-    <description>The usage threshold at which an in-memory merge will be
-      initiated, expressed as a percentage of the total memory allocated to
-      storing in-memory map outputs, as defined by
-      mapred.job.shuffle.input.buffer.percent.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.shuffle.input.buffer.percent</name>
-    <value>0.7</value>
-    <description>The percentage of memory to be allocated from the maximum heap
-      size to storing map outputs during the shuffle.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.map.output.compression.codec</name>
-    <value>org.apache.hadoop.io.compress.SnappyCodec</value>
-    <description>If the map outputs are compressed, how should they be
-      compressed
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.output.compression.type</name>
-    <value>BLOCK</value>
-    <description>If the job outputs are to compressed as SequenceFiles, how should
-      they be compressed? Should be one of NONE, RECORD or BLOCK.
-    </description>
-  </property>
-
-
-  <property>
-    <name>mapred.jobtracker.completeuserjobs.maximum</name>
-    <value>0</value>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.taskScheduler</name>
-    <value>org.apache.hadoop.mapred.CapacityTaskScheduler</value>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.restart.recover</name>
-    <value>false</value>
-    <description>"true" to enable (job) recovery upon restart,
-      "false" to start afresh
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.reduce.input.buffer.percent</name>
-    <value>0.0</value>
-    <description>The percentage of memory- relative to the maximum heap size- to
-      retain map outputs during the reduce. When the shuffle is concluded, any
-      remaining map outputs in memory must consume less than this threshold before
-      the reduce can begin.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.input.limit</name>
-    <value>10737418240</value>
-    <description>The limit on the input size of the reduce. (This value
-      is 10 Gb.)  If the estimated input size of the reduce is greater than
-      this value, job is failed. A value of -1 means that there is no limit
-      set. </description>
-  </property>
-
-
-  <!-- copied from kryptonite configuration -->
-  <property>
-    <name>mapred.compress.map.output</name>
-    <value></value>
-  </property>
-
-
-  <property>
-    <name>mapred.task.timeout</name>
-    <value>600000</value>
-    <description>The number of milliseconds before a task will be
-      terminated if it neither reads an input, writes an output, nor
-      updates its status string.
-    </description>
-  </property>
-
-  <property>
-    <name>jetty.connector</name>
-    <value>org.mortbay.jetty.nio.SelectChannelConnector</value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.task.tracker.task-controller</name>
-    <value>org.apache.hadoop.mapred.DefaultTaskController</value>
-    <description>
-      TaskController which is used to launch and manage task execution.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.child.root.logger</name>
-    <value>INFO,TLA</value>
-  </property>
-
-  <property>
-    <name>ambari.mapred.child.java.opts.memory</name>
-    <value>768</value>
-
-    <description>Java options Memory for the TaskTracker child processes</description>
-  </property>
-
-  <property>
-    <name>mapred.child.java.opts</name>
-    <value>-server -Xmx${ambari.mapred.child.java.opts.memory}m -Djava.net.preferIPv4Stack=true</value>
-    <description>Java options for the TaskTracker child processes</description>
-  </property>
-
-  <property>
-    <name>mapred.cluster.map.memory.mb</name>
-    <value>1536</value>
-    <description>
-      The virtual memory size of a single Map slot in the MapReduce framework
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.cluster.reduce.memory.mb</name>
-    <value>2048</value>
-    <description>
-      The virtual memory size of a single Reduce slot in the MapReduce framework
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.map.memory.mb</name>
-    <value>1536</value>
-    <description>
-      Virtual memory for single Map task
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.reduce.memory.mb</name>
-    <value>2048</value>
-    <description>
-      Virtual memory for single Reduce task
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.cluster.max.map.memory.mb</name>
-    <value>6144</value>
-    <description>
-      Upper limit on virtual memory size for a single Map task of any MapReduce job
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.cluster.max.reduce.memory.mb</name>
-    <value>4096</value>
-    <description>
-      Upper limit on virtual memory size for a single Reduce task of any MapReduce job
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.hosts</name>
-    <value>/etc/hadoop/conf/mapred.include</value>
-    <description>
-      Names a file that contains the list of nodes that may
-      connect to the jobtracker.  If the value is empty, all hosts are
-      permitted.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.hosts.exclude</name>
-    <value>/etc/hadoop/conf/mapred.exclude</value>
-    <description>
-      Names a file that contains the list of hosts that
-      should be excluded by the jobtracker.  If the value is empty, no
-      hosts are excluded.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.max.tracker.blacklists</name>
-    <value>16</value>
-    <description>
-      if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.healthChecker.script.path</name>
-    <value>/etc/hadoop/conf/health_check</value>
-    <description>
-      Directory path to view job status
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.healthChecker.interval</name>
-    <value>135000</value>
-  </property>
-
-  <property>
-    <name>mapred.healthChecker.script.timeout</name>
-    <value>60000</value>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker.persist.jobstatus.active</name>
-    <value>false</value>
-    <description>Indicates if persistency of job status information is
-      active or not.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker.persist.jobstatus.hours</name>
-    <value>1</value>
-    <description>The number of hours job status information is persisted in DFS.
-      The job status information will be available after it drops of the memory
-      queue and between jobtracker restarts. With a zero value the job status
-      information is not persisted at all in DFS.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker.persist.jobstatus.dir</name>
-    <value>/mapred/jobstatus</value>
-    <description>The directory where the job status information is persisted
-      in a file system to be available after it drops of the memory queue and
-      between jobtracker restarts.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.retirejob.check</name>
-    <value>10000</value>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.retirejob.interval</name>
-    <value>21600000</value>
-  </property>
-
-  <property>
-    <name>mapred.job.tracker.history.completed.location</name>
-    <value>/mapred/history/done</value>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.task.maxvmem</name>
-    <value></value>
-    <final>true</final>
-    <description>No description</description>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.maxtasks.per.job</name>
-    <value>-1</value>
-    <final>true</final>
-    <description>The maximum number of tasks for a single job.
-      A value of -1 indicates that there is no maximum.  </description>
-  </property>
-
-  <property>
-    <name>mapreduce.fileoutputcommitter.marksuccessfuljobs</name>
-    <value>false</value>
-  </property>
-
-  <property>
-    <name>mapred.userlog.retain.hours</name>
-    <value>24</value>
-    <description>
-      The maximum time, in hours, for which the user-logs are to be retained after the job completion.
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.job.reuse.jvm.num.tasks</name>
-    <value>1</value>
-    <description>
-      How many tasks to run per jvm. If set to -1, there is no limit
-    </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapreduce.jobtracker.kerberos.principal</name>
-    <value></value>
-    <description>
-      JT user name key.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.tasktracker.kerberos.principal</name>
-    <value></value>
-    <description>
-      tt user name key. "_HOST" is replaced by the host name of the task tracker.
-    </description>
-  </property>
-
-
-  <property>
-    <name>hadoop.job.history.user.location</name>
-    <value>none</value>
-    <final>true</final>
-  </property>
-
-
-  <property>
-    <name>mapreduce.jobtracker.keytab.file</name>
-    <value></value>
-    <description>
-      The keytab for the jobtracker principal.
-    </description>
-
-  </property>
-
-  <property>
-    <name>mapreduce.tasktracker.keytab.file</name>
-    <value></value>
-    <description>The filename of the keytab for the task tracker</description>
-  </property>
-
-  <property>
-    <name>mapred.task.tracker.http.address</name>
-    <value></value>
-    <description>Http address for task tracker.</description>
-  </property>
-
-  <property>
-    <name>mapreduce.jobtracker.staging.root.dir</name>
-    <value>/user</value>
-    <description>The Path prefix for where the staging directories should be placed. The next level is always the user's
-      name. It is a path in the default file system.</description>
-  </property>
-
-  <property>
-    <name>mapreduce.tasktracker.group</name>
-    <value>hadoop</value>
-    <description>The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.</description>
-
-  </property>
-
-  <property>
-    <name>mapreduce.jobtracker.split.metainfo.maxsize</name>
-    <value>50000000</value>
-    <final>true</final>
-    <description>If the size of the split metainfo file is larger than this, the JobTracker will fail the job during
-      initialize.
-    </description>
-  </property>
-  <property>
-    <name>mapreduce.history.server.embedded</name>
-    <value>false</value>
-    <description>Should job history server be embedded within Job tracker
-      process</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapreduce.history.server.http.address</name>
-    <!-- cluster variant -->
-    <value>localhost:51111</value>
-    <description>Http address of the history server</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>mapreduce.jobhistory.kerberos.principal</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>Job history user name key. (must map to same user as JT
-      user)</description>
-  </property>
-
-  <property>
-    <name>mapreduce.jobhistory.keytab.file</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>The keytab for the job history server principal.</description>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.blacklist.fault-timeout-window</name>
-    <value>180</value>
-    <description>
-      3-hour sliding window (value is in minutes)
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.jobtracker.blacklist.fault-bucket-width</name>
-    <value>15</value>
-    <description>
-      15-minute bucket size (value is in minutes)
-    </description>
-  </property>
-
-  <property>
-    <name>mapred.queue.names</name>
-    <value>default</value>
-    <description> Comma separated list of queues configured for this jobtracker.</description>
-  </property>
-  
-  <property>
-    <name>mapreduce.jobhistory.intermediate-done-dir</name>
-    <value>/mr-history/tmp</value>
-    <description>
-      Directory where history files are written by MapReduce jobs.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.jobhistory.done-dir</name>
-    <value>/mr-history/done</value>
-    <description>
-      Directory where history files are managed by the MR JobHistory Server.
-    </description>
-  </property>
-
-  <property>       
-    <name>mapreduce.jobhistory.webapp.address</name>
-    <value>localhost:19888</value>
-    <description>Enter your JobHistoryServer hostname.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/metainfo.xml
deleted file mode 100644
index 71783d7..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/metainfo.xml
+++ /dev/null
@@ -1,102 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>MAPREDUCE</name>
-      <comment>Apache Hadoop Distributed Processing Framework</comment>
-      <version>1.2.0.1.3.3.0</version>
-      <components>
-        <component>
-          <name>JOBTRACKER</name>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <commandScript>
-            <script>scripts/jobtracker.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <customCommands>
-            <customCommand>
-              <name>DECOMMISSION</name>
-              <commandScript>
-                <script>scripts/jobtracker.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>600</timeout>
-              </commandScript>
-            </customCommand>
-          </customCommands>
-        </component>
-
-        <component>
-          <name>TASKTRACKER</name>
-          <category>SLAVE</category>
-          <cardinality>1+</cardinality>
-          <commandScript>
-            <script>scripts/tasktracker.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>MAPREDUCE_CLIENT</name>
-          <category>CLIENT</category>
-          <cardinality>0+</cardinality> 
-          <commandScript>
-            <script>scripts/client.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-              
-        <component>
-          <name>HISTORYSERVER</name>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <auto-deploy>
-            <enabled>true</enabled>
-            <co-locate>MAPREDUCE/JOBTRACKER</co-locate>
-          </auto-deploy>
-          <commandScript>
-            <script>scripts/historyserver.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-      </components>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <configuration-dependencies>
-        <config-type>capacity-scheduler</config-type>
-        <config-type>core-site</config-type>
-        <config-type>global</config-type>
-        <config-type>mapred-site</config-type>
-        <config-type>mapred-queue-acls</config-type>
-      </configuration-dependencies>
-    </service>
-
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/client.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/client.py
deleted file mode 100644
index 79c644d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/client.py
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-import sys
-from resource_management import *
-
-from mapreduce import mapreduce
-from service import service
-
-class Client(Script):
-
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    mapreduce()
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-if __name__ == "__main__":
-  Client().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/historyserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/historyserver.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/historyserver.py
deleted file mode 100644
index 8eb2089..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/historyserver.py
+++ /dev/null
@@ -1,59 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-import sys
-from resource_management import *
-
-from mapreduce import mapreduce
-from service import service
-
-class Historyserver(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-  
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    mapreduce()
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-    service('historyserver',
-            action='start'
-    )
-
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    service('historyserver',
-            action='stop'
-    )
-
-  def status(self, env):
-     import status_params
-     env.set_params(status_params)
-     check_process_status(status_params.historyserver_pid_file)
-
-if __name__ == "__main__":
-  Historyserver().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/jobtracker.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/jobtracker.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/jobtracker.py
deleted file mode 100644
index 8f7f1d7..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/jobtracker.py
+++ /dev/null
@@ -1,104 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-import sys
-from resource_management import *
-
-from mapreduce import mapreduce
-from service import service
-
-class Jobtracker(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    mapreduce()
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env) # FOR SECURITY
-    service('jobtracker',
-            action='start'
-    )
-    
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    service('jobtracker',
-            action='stop'
-    )
-    
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.jobtracker_pid_file)
-    pass
-
-  def decommission(self, env):
-    import params
-
-    env.set_params(params)
-
-    mapred_user = params.mapred_user
-    conf_dir = params.conf_dir
-    user_group = params.user_group
-
-    File(params.exclude_file_path,
-         content=Template("exclude_hosts_list.j2"),
-         owner=mapred_user,
-         group=user_group
-    )
-
-    ExecuteHadoop('mradmin -refreshNodes',
-                user=mapred_user,
-                conf_dir=conf_dir,
-                kinit_override=True)
-    pass
-
-if __name__ == "__main__":
-  Jobtracker().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/mapreduce.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/mapreduce.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/mapreduce.py
deleted file mode 100644
index c5fd002..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/mapreduce.py
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-import sys
-
-
-def mapreduce():
-  import params
-
-  Directory([params.mapred_pid_dir,params.mapred_log_dir],
-            owner=params.mapred_user,
-            group=params.user_group,
-            recursive=True
-  )
-
-  Directory(params.mapred_local_dir,
-            owner=params.mapred_user,
-            mode=0755,
-            recursive=True
-  )
-
-  File(params.exclude_file_path,
-            owner=params.mapred_user,
-            group=params.user_group,
-  )
-
-  File(params.mapred_hosts_file_path,
-            owner=params.mapred_user,
-            group=params.user_group,
-  )
\ No newline at end of file


[07/12] AMBARI-4336. Move 1.3.4 stack to 1.3.3 using the python libraries. (mahadev)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/hdfs_namenode.py
deleted file mode 100644
index d8e191f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/hdfs_namenode.py
+++ /dev/null
@@ -1,192 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from utils import service
-from utils import hdfs_directory
-import urlparse
-
-
-def namenode(action=None, format=True):
-  import params
-
-  if action == "configure":
-    create_name_dirs(params.dfs_name_dir)
-
-  if action == "start":
-    if format:
-      format_namenode()
-      pass
-    service(
-      action="start", name="namenode", user=params.hdfs_user,
-      keytab=params.dfs_namenode_keytab_file,
-      create_pid_dir=True,
-      create_log_dir=True,
-      principal=params.dfs_namenode_kerberos_principal
-    )
-
-    # TODO: extract creating of dirs to different services
-    create_app_directories()
-    create_user_directories()
-
-  if action == "stop":
-    service(
-      action="stop", name="namenode", user=params.hdfs_user,
-      keytab=params.dfs_namenode_keytab_file,
-      principal=params.dfs_namenode_kerberos_principal
-    )
-
-  if action == "decommission":
-    decommission()
-
-def create_name_dirs(directories):
-  import params
-
-  dirs = directories.split(",")
-  Directory(dirs,
-            mode=0755,
-            owner=params.hdfs_user,
-            group=params.user_group,
-            recursive=True
-  )
-
-
-def create_app_directories():
-  import params
-
-  hdfs_directory(name="/tmp",
-                 owner=params.hdfs_user,
-                 mode="777"
-  )
-  #mapred directories
-  if params.has_jobtracker:
-    hdfs_directory(name="/mapred",
-                   owner=params.mapred_user
-    )
-    hdfs_directory(name="/mapred/system",
-                   owner=params.mapred_user
-    )
-    #hbase directories
-  if len(params.hbase_master_hosts) != 0:
-    hdfs_directory(name=params.hbase_hdfs_root_dir,
-                   owner=params.hbase_user
-    )
-    hdfs_directory(name=params.hbase_staging_dir,
-                   owner=params.hbase_user,
-                   mode="711"
-    )
-    #hive directories
-  if len(params.hive_server_host) != 0:
-    hdfs_directory(name=params.hive_apps_whs_dir,
-                   owner=params.hive_user,
-                   mode="777"
-    )
-  if len(params.hcat_server_hosts) != 0:
-    hdfs_directory(name=params.webhcat_apps_dir,
-                   owner=params.webhcat_user,
-                   mode="755"
-    )
-  if len(params.hs_host) != 0:
-    hdfs_directory(name=params.mapreduce_jobhistory_intermediate_done_dir,
-                   owner=params.mapred_user,
-                   group=params.user_group,
-                   mode="777"
-    )
-
-    hdfs_directory(name=params.mapreduce_jobhistory_done_dir,
-                   owner=params.mapred_user,
-                   group=params.user_group,
-                   mode="777"
-    )
-
-  pass
-
-
-def create_user_directories():
-  import params
-
-  hdfs_directory(name=params.smoke_hdfs_user_dir,
-                 owner=params.smoke_user,
-                 mode=params.smoke_hdfs_user_mode
-  )
-
-  if params.has_hive_server_host:
-    hdfs_directory(name=params.hive_hdfs_user_dir,
-                   owner=params.hive_user,
-                   mode=params.hive_hdfs_user_mode
-    )
-
-  if params.has_hcat_server_host:
-    if params.hcat_hdfs_user_dir != params.webhcat_hdfs_user_dir:
-      hdfs_directory(name=params.hcat_hdfs_user_dir,
-                     owner=params.hcat_user,
-                     mode=params.hcat_hdfs_user_mode
-      )
-    hdfs_directory(name=params.webhcat_hdfs_user_dir,
-                   owner=params.webhcat_user,
-                   mode=params.webhcat_hdfs_user_mode
-    )
-
-  if params.has_oozie_server:
-    hdfs_directory(name=params.oozie_hdfs_user_dir,
-                   owner=params.oozie_user,
-                   mode=params.oozie_hdfs_user_mode
-    )
-
-
-def format_namenode(force=None):
-  import params
-
-  mark_dir = params.namenode_formatted_mark_dir
-  dfs_name_dir = params.dfs_name_dir
-  hdfs_user = params.hdfs_user
-  hadoop_conf_dir = params.hadoop_conf_dir
-
-  if True:
-    if force:
-      ExecuteHadoop('namenode -format',
-                    kinit_override=True)
-    else:
-      File('/tmp/checkForFormat.sh',
-           content=StaticFile("checkForFormat.sh"),
-           mode=0755)
-      Execute(format(
-        "sh /tmp/checkForFormat.sh {hdfs_user} {hadoop_conf_dir} {mark_dir} "
-        "{dfs_name_dir}"),
-              not_if=format("test -d {mark_dir}"),
-              path="/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin")
-    Execute(format("mkdir -p {mark_dir}"))
-
-
-def decommission():
-  import params
-
-  hdfs_user = params.hdfs_user
-  conf_dir = params.hadoop_conf_dir
-
-  File(params.exclude_file_path,
-       content=Template("exclude_hosts_list.j2"),
-       owner=hdfs_user,
-       group=params.user_group
-  )
-
-  ExecuteHadoop('dfsadmin -refreshNodes',
-                user=hdfs_user,
-                conf_dir=conf_dir,
-                kinit_override=True)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/hdfs_snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/hdfs_snamenode.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/hdfs_snamenode.py
deleted file mode 100644
index a943455..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/hdfs_snamenode.py
+++ /dev/null
@@ -1,53 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from utils import service
-from utils import hdfs_directory
-
-
-def snamenode(action=None, format=False):
-  import params
-
-  if action == "configure":
-    Directory(params.fs_checkpoint_dir,
-              recursive=True,
-              mode=0755,
-              owner=params.hdfs_user,
-              group=params.user_group)
-  elif action == "start":
-    service(
-      action=action,
-      name="secondarynamenode",
-      user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True,
-      keytab=params.dfs_secondary_namenode_keytab_file,
-      principal=params.dfs_secondary_namenode_kerberos_principal
-    )
-  elif action == "stop":
-    service(
-      action=action,
-      name="secondarynamenode",
-      user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True,
-      keytab=params.dfs_secondary_namenode_keytab_file,
-      principal=params.dfs_secondary_namenode_kerberos_principal
-    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/namenode.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/namenode.py
deleted file mode 100644
index 80700c8..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/namenode.py
+++ /dev/null
@@ -1,66 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from hdfs_namenode import namenode
-
-
-class NameNode(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env)
-    env.set_params(params)
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-    self.config(env)
-    namenode(action="start")
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-    namenode(action="stop")
-
-  def config(self, env):
-    import params
-
-    env.set_params(params)
-    namenode(action="configure")
-    pass
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    check_process_status(status_params.namenode_pid_file)
-    pass
-
-  def decommission(self, env):
-    import params
-
-    env.set_params(params)
-    namenode(action="decommission")
-    pass
-
-if __name__ == "__main__":
-  NameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/params.py
deleted file mode 100644
index 3e0e65b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/params.py
+++ /dev/null
@@ -1,165 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import status_params
-import os
-
-config = Script.get_config()
-
-#security params
-security_enabled = config['configurations']['global']['security_enabled']
-dfs_journalnode_keytab_file = config['configurations']['hdfs-site']['dfs.journalnode.keytab.file']
-dfs_web_authentication_kerberos_keytab = config['configurations']['hdfs-site']['dfs.journalnode.keytab.file']
-dfs_secondary_namenode_keytab_file =  config['configurations']['hdfs-site']['dfs.secondary.namenode.keytab.file']
-dfs_datanode_keytab_file =  config['configurations']['hdfs-site']['dfs.datanode.keytab.file']
-dfs_namenode_keytab_file =  config['configurations']['hdfs-site']['dfs.namenode.keytab.file']
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-hdfs_user_keytab = config['configurations']['global']['hdfs_user_keytab']
-
-dfs_datanode_kerberos_principal = config['configurations']['hdfs-site']['dfs.datanode.kerberos.principal']
-dfs_journalnode_kerberos_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.principal']
-dfs_secondary_namenode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.internal.spnego.principal']
-dfs_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.namenode.kerberos.principal']
-dfs_web_authentication_kerberos_principal = config['configurations']['hdfs-site']['dfs.web.authentication.kerberos.principal']
-dfs_secondary_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.principal']
-dfs_journalnode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.internal.spnego.principal']
-
-#exclude file
-hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
-exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
-
-kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-#hosts
-hostname = config["hostname"]
-rm_host = default("/clusterHostInfo/rm_host", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
-oozie_servers = default("/clusterHostInfo/oozie_server", [])
-hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
-hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-hs_host = default("/clusterHostInfo/hs_host", [])
-jtnode_host = default("/clusterHostInfo/jtnode_host", [])
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-nm_host = default("/clusterHostInfo/nm_host", [])
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-journalnode_hosts = default("/clusterHostInfo/journalnode_hosts", [])
-zkfc_hosts = default("/clusterHostInfo/zkfc_hosts", [])
-
-has_ganglia_server = not len(ganglia_server_hosts) == 0
-has_namenodes = not len(namenode_host) == 0
-has_jobtracker = not len(jtnode_host) == 0
-has_resourcemanager = not len(rm_host) == 0
-has_histroryserver = not len(hs_host) == 0
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_slaves = not len(slave_hosts) == 0
-has_nagios = not len(hagios_server_hosts) == 0
-has_oozie_server = not len(oozie_servers)  == 0
-has_hcat_server_host = not len(hcat_server_hosts)  == 0
-has_hive_server_host = not len(hive_server_host)  == 0
-has_journalnode_hosts = not len(journalnode_hosts)  == 0
-has_zkfc_hosts = not len(zkfc_hosts)  == 0
-
-
-is_namenode_master = hostname in namenode_host
-is_jtnode_master = hostname in jtnode_host
-is_rmnode_master = hostname in rm_host
-is_hsnode_master = hostname in hs_host
-is_hbase_master = hostname in hbase_master_hosts
-is_slave = hostname in slave_hosts
-
-if has_ganglia_server:
-  ganglia_server_host = ganglia_server_hosts[0]
-
-#users and groups
-yarn_user = config['configurations']['global']['yarn_user']
-hbase_user = config['configurations']['global']['hbase_user']
-nagios_user = config['configurations']['global']['nagios_user']
-oozie_user = config['configurations']['global']['oozie_user']
-webhcat_user = config['configurations']['global']['hcat_user']
-hcat_user = config['configurations']['global']['hcat_user']
-hive_user = config['configurations']['global']['hive_user']
-smoke_user =  config['configurations']['global']['smokeuser']
-mapred_user = config['configurations']['global']['mapred_user']
-hdfs_user = status_params.hdfs_user
-
-user_group = config['configurations']['global']['user_group']
-proxyuser_group =  config['configurations']['global']['proxyuser_group']
-nagios_group = config['configurations']['global']['nagios_group']
-smoke_user_group = "users"
-
-#hadoop params
-hadoop_conf_dir = "/etc/hadoop/conf"
-hadoop_pid_dir_prefix = status_params.hadoop_pid_dir_prefix
-hadoop_bin = "/usr/lib/hadoop/bin"
-
-hdfs_log_dir_prefix = config['configurations']['global']['hdfs_log_dir_prefix']
-
-dfs_domain_socket_path = "/var/lib/hadoop-hdfs/dn_socket"
-dfs_domain_socket_dir = os.path.dirname(dfs_domain_socket_path)
-
-hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
-
-jn_edits_dir = config['configurations']['hdfs-site']['dfs.journalnode.edits.dir']#"/grid/0/hdfs/journal"
-
-# if stack_version[0] == "2":
-#dfs_name_dir = config['configurations']['hdfs-site']['dfs.namenode.name.dir']
-# else:
-dfs_name_dir = config['configurations']['hdfs-site']['dfs.name.dir']#","/tmp/hadoop-hdfs/dfs/name")
-
-namenode_dirs_created_stub_dir = format("{hdfs_log_dir_prefix}/{hdfs_user}")
-namenode_dirs_stub_filename = "namenode_dirs_created"
-
-hbase_hdfs_root_dir = config['configurations']['hbase-site']['hbase.rootdir']#","/apps/hbase/data")
-hbase_staging_dir = "/apps/hbase/staging"
-hive_apps_whs_dir = config['configurations']['hive-site']["hive.metastore.warehouse.dir"] #, "/apps/hive/warehouse")
-webhcat_apps_dir = "/apps/webhcat"
-mapreduce_jobhistory_intermediate_done_dir = config['configurations']['mapred-site']['mapreduce.jobhistory.intermediate-done-dir']#","/app-logs")
-mapreduce_jobhistory_done_dir = config['configurations']['mapred-site']['mapreduce.jobhistory.done-dir']#","/mr-history/done")
-
-if has_oozie_server:
-  oozie_hdfs_user_dir = format("/user/{oozie_user}")
-  oozie_hdfs_user_mode = 775
-if has_hcat_server_host:
-  hcat_hdfs_user_dir = format("/user/{hcat_user}")
-  hcat_hdfs_user_mode = 755
-  webhcat_hdfs_user_dir = format("/user/{webhcat_user}")
-  webhcat_hdfs_user_mode = 755
-if has_hive_server_host:
-  hive_hdfs_user_dir = format("/user/{hive_user}")
-  hive_hdfs_user_mode = 700
-smoke_hdfs_user_dir = format("/user/{smoke_user}")
-smoke_hdfs_user_mode = 770
-
-namenode_formatted_mark_dir = format("{hadoop_pid_dir_prefix}/hdfs/namenode/formatted/")
-
-# if stack_version[0] == "2":
-#fs_checkpoint_dir = config['configurations']['hdfs-site']['dfs.namenode.checkpoint.dir'] #","/tmp/hadoop-hdfs/dfs/namesecondary")
-# else:
-fs_checkpoint_dir = config['configurations']['core-site']['fs.checkpoint.dir']#","/tmp/hadoop-hdfs/dfs/namesecondary")
-
-# if stack_version[0] == "2":
-#dfs_data_dir = config['configurations']['hdfs-site']['dfs.datanode.data.dir']#,"/tmp/hadoop-hdfs/dfs/data")
-# else:
-dfs_data_dir = config['configurations']['hdfs-site']['dfs.data.dir']#,"/tmp/hadoop-hdfs/dfs/data")
-
-
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/service_check.py
deleted file mode 100644
index 5cd264b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/service_check.py
+++ /dev/null
@@ -1,106 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-
-class HdfsServiceCheck(Script):
-  def service_check(self, env):
-    import params
-
-    env.set_params(params)
-    unique = get_unique_id_and_date()
-    dir = '/tmp'
-    tmp_file = format("{dir}/{unique}")
-
-    safemode_command = "dfsadmin -safemode get | grep OFF"
-
-    create_dir_cmd = format("fs -mkdir {dir} ; hadoop fs -chmod -R 777 {dir}")
-    test_dir_exists = format("hadoop fs -test -e {dir}")
-    cleanup_cmd = format("fs -rm {tmp_file}")
-    #cleanup put below to handle retries; if retrying there wil be a stale file
-    #that needs cleanup; exit code is fn of second command
-    create_file_cmd = format(
-      "{cleanup_cmd}; hadoop fs -put /etc/passwd {tmp_file}")
-    test_cmd = format("fs -test -e {tmp_file}")
-    if params.security_enabled:
-      Execute(format(
-        "su - {smoke_user} -c '{kinit_path_local} -kt {smoke_user_keytab} "
-        "{smoke_user}'"))
-    ExecuteHadoop(safemode_command,
-                  user=params.smoke_user,
-                  logoutput=True,
-                  conf_dir=params.hadoop_conf_dir,
-                  try_sleep=15,
-                  tries=20
-    )
-    ExecuteHadoop(create_dir_cmd,
-                  user=params.smoke_user,
-                  logoutput=True,
-                  not_if=test_dir_exists,
-                  conf_dir=params.hadoop_conf_dir,
-                  try_sleep=3,
-                  tries=5
-    )
-    ExecuteHadoop(create_file_cmd,
-                  user=params.smoke_user,
-                  logoutput=True,
-                  conf_dir=params.hadoop_conf_dir,
-                  try_sleep=3,
-                  tries=5
-    )
-    ExecuteHadoop(test_cmd,
-                  user=params.smoke_user,
-                  logoutput=True,
-                  conf_dir=params.hadoop_conf_dir,
-                  try_sleep=3,
-                  tries=5
-    )
-    if params.has_journalnode_hosts:
-      journalnode_port = params.journalnode_port
-      smoke_test_user = params.smoke_user
-      checkWebUIFileName = "checkWebUI.py"
-      checkWebUIFilePath = format("/tmp/{checkWebUIFileName}")
-      comma_sep_jn_hosts = ",".join(params.journalnode_hosts)
-      checkWebUICmd = format(
-        "su - {smoke_test_user} -c 'python {checkWebUIFilePath} -m "
-        "{comma_sep_jn_hosts} -p {journalnode_port}'")
-      File(checkWebUIFilePath,
-           content=StaticFile(checkWebUIFileName))
-
-      Execute(checkWebUICmd,
-              logoutput=True,
-              try_sleep=3,
-              tries=5
-      )
-
-    if params.has_zkfc_hosts:
-      pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
-      pid_file = format("{pid_dir}/hadoop-{hdfs_user}-zkfc.pid")
-      check_zkfc_process_cmd = format(
-        "ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
-      Execute(check_zkfc_process_cmd,
-              logoutput=True,
-              try_sleep=3,
-              tries=5
-      )
-
-
-if __name__ == "__main__":
-  HdfsServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/snamenode.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/snamenode.py
deleted file mode 100644
index 8f682ec..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/snamenode.py
+++ /dev/null
@@ -1,64 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from hdfs_snamenode import snamenode
-
-
-class SNameNode(Script):
-  def install(self, env):
-    import params
-
-    env.set_params(params)
-
-    self.install_packages(env)
-
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-
-    self.config(env)
-    snamenode(action="start")
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-
-    snamenode(action="stop")
-
-  def config(self, env):
-    import params
-
-    env.set_params(params)
-
-    snamenode(action="configure")
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-
-    check_process_status(status_params.snamenode_pid_file)
-
-
-if __name__ == "__main__":
-  SNameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/status_params.py
deleted file mode 100644
index 4097373..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/status_params.py
+++ /dev/null
@@ -1,31 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-hadoop_pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix']
-hdfs_user = config['configurations']['global']['hdfs_user']
-hdp_pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
-datanode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-datanode.pid")
-namenode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-namenode.pid")
-snamenode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-secondarynamenode.pid")
-journalnode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-journalnode.pid")
-zkfc_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-zkfc.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/utils.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/utils.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/utils.py
deleted file mode 100644
index e28d0e9..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/utils.py
+++ /dev/null
@@ -1,133 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-
-def service(action=None, name=None, user=None, create_pid_dir=False,
-            create_log_dir=False, keytab=None, principal=None):
-  import params
-
-  kinit_cmd = "true"
-  pid_dir = format("{hadoop_pid_dir_prefix}/{user}")
-  pid_file = format("{pid_dir}/hadoop-{user}-{name}.pid")
-  log_dir = format("{hdfs_log_dir_prefix}/{user}")
-  hadoop_daemon = format(
-    "export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && "
-    "{hadoop_bin}/hadoop-daemon.sh")
-  cmd = format("{hadoop_daemon} --config {hadoop_conf_dir}")
-
-  if create_pid_dir:
-    Directory(pid_dir,
-              owner=user,
-              recursive=True)
-  if create_log_dir:
-    Directory(log_dir,
-              owner=user,
-              recursive=True)
-
-  if params.security_enabled:
-    principal_replaced = principal.replace("_HOST", params.hostname)
-    kinit_cmd = format("kinit -kt {keytab} {principal_replaced}")
-
-    if name == "datanode":
-      user = "root"
-      pid_file = format(
-        "{hadoop_pid_dir_prefix}/{hdfs_user}/hadoop-{hdfs_user}-{name}.pid")
-
-  daemon_cmd = format("{cmd} {action} {name}")
-
-  service_is_up = format(
-    "ls {pid_file} >/dev/null 2>&1 &&"
-    " ps `cat {pid_file}` >/dev/null 2>&1") if action == "start" else None
-
-  Execute(kinit_cmd)
-  Execute(daemon_cmd,
-          user = user,
-          not_if=service_is_up
-  )
-  if action == "stop":
-    File(pid_file,
-         action="delete",
-         ignore_failures=True
-    )
-
-
-def hdfs_directory(name=None, owner=None, group=None,
-                   mode=None, recursive_chown=False, recursive_chmod=False):
-  import params
-
-  dir_exists = format("hadoop fs -ls {name} >/dev/null 2>&1")
-  namenode_safe_mode_off = "hadoop dfsadmin -safemode get|grep 'Safe mode is OFF'"
-
-  stub_dir = params.namenode_dirs_created_stub_dir
-  stub_filename = params.namenode_dirs_stub_filename
-  dir_absent_in_stub = format(
-    "grep -q '^{name}$' {stub_dir}/{stub_filename} > /dev/null 2>&1; test $? -ne 0")
-  record_dir_in_stub = format("echo '{name}' >> {stub_dir}/{stub_filename}")
-  tries = 3
-  try_sleep = 10
-  dfs_check_nn_status_cmd = "true"
-
-  #if params.stack_version[0] == "2":
-  #mkdir_cmd = format("fs -mkdir -p {name}")
-  #else:
-  mkdir_cmd = format("fs -mkdir {name}")
-
-  if params.security_enabled:
-    Execute(format("kinit -kt {hdfs_user_keytab} {hdfs_user}"),
-            user = params.hdfs_user)
-  ExecuteHadoop(mkdir_cmd,
-                try_sleep=try_sleep,
-                tries=tries,
-                not_if=format(
-                  "{dir_absent_in_stub} && {dfs_check_nn_status_cmd} && "
-                  "{dir_exists} && ! {namenode_safe_mode_off}"),
-                only_if=format(
-                  "su - hdfs -c '{dir_absent_in_stub} && {dfs_check_nn_status_cmd} && "
-                  "! {dir_exists}'"),
-                conf_dir=params.hadoop_conf_dir,
-                user=params.hdfs_user
-  )
-  Execute(record_dir_in_stub,
-          user=params.hdfs_user,
-          only_if=format("! {dir_absent_in_stub}")
-  )
-
-  recursive = "-R" if recursive_chown else ""
-  perm_cmds = []
-
-  if owner:
-    chown = owner
-    if group:
-      chown = format("{owner}:{group}")
-    perm_cmds.append(format("fs -chown {recursive} {chown} {name}"))
-  if mode:
-    perm_cmds.append(format("fs -chmod {recursive} {mode} {name}"))
-  for cmd in perm_cmds:
-    ExecuteHadoop(cmd,
-                  user=params.hdfs_user,
-                  only_if=format("su - hdfs -c '{dir_absent_in_stub} && {dfs_check_nn_status_cmd} && {namenode_safe_mode_off} && {dir_exists}'"),
-                  try_sleep=try_sleep,
-                  tries=tries,
-                  conf_dir=params.hadoop_conf_dir
-    )
-
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/templates/exclude_hosts_list.j2
deleted file mode 100644
index c3af46e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/templates/exclude_hosts_list.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-{% for host in hdfs_exclude_file %}
-{{host}}
-{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/configuration/global.xml
deleted file mode 100644
index ae7f586..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/configuration/global.xml
+++ /dev/null
@@ -1,148 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>hivemetastore_host</name>
-    <value></value>
-    <description>Hive Metastore host.</description>
-  </property>
-  <property>
-    <name>hive_database</name>
-    <value></value>
-    <description>Hive database name.</description>
-  </property>
-  <property>
-    <name>hive_existing_mysql_database</name>
-    <value></value>
-    <description>Hive database name.</description>
-  </property>
-  <property>
-    <name>hive_existing_mysql_host</name>
-    <value></value>
-    <description></description>
-  </property>
-  <property>
-    <name>hive_existing_oracle_database</name>
-    <value></value>
-    <description>Hive database name.</description>
-  </property>
-  <property>
-    <name>hive_existing_oracle_host</name>
-    <value></value>
-    <description></description>
-  </property>
-  <property>
-    <name>hive_ambari_database</name>
-    <value>MySQL</value>
-    <description>Database type.</description>
-  </property>  
-  <property>
-    <name>hive_ambari_host</name>
-    <value></value>
-    <description>Database hostname.</description>
-  </property>
-  <property>
-    <name>hive_database_name</name>
-    <value></value>
-    <description>Database hname</description>
-  </property>    
-  <property>
-    <name>hive_metastore_user_name</name>
-    <value>hive</value>
-    <description>Database username to use to connect to the database.</description>
-  </property>    
-  <property>
-    <name>hive_metastore_user_passwd</name>
-    <value></value>
-    <description>Database password to use to connect to the database.</description>
-  </property>    
-  <property>
-    <name>hive_metastore_port</name>
-    <value>9083</value>
-    <description>Hive Metastore port.</description>
-  </property>    
-  <property>
-    <name>hive_lib</name>
-    <value>/usr/lib/hive/lib/</value>
-    <description>Hive Library.</description>
-  </property>    
-  <property>
-    <name>hive_dbroot</name>
-    <value>/usr/lib/hive/lib/</value>
-    <description>Hive DB Directory.</description>
-  </property>      
-  <property>
-    <name>hive_conf_dir</name>
-    <value>/etc/hive/conf</value>
-    <description>Hive Conf Dir.</description>
-  </property>
-  <property>
-    <name>hive_log_dir</name>
-    <value>/var/log/hive</value>
-    <description>Directory for Hive Log files.</description>
-  </property>
-  <property>
-    <name>hive_pid_dir</name>
-    <value>/var/run/hive</value>
-    <description>Hive PID Dir.</description>
-  </property>
-  <property>
-    <name>mysql_connector_url</name>
-    <value>${download_url}/mysql-connector-java-5.1.18.zip</value>
-    <description>Hive PID Dir.</description>
-  </property>
-  <property>
-    <name>hive_aux_jars_path</name>
-    <value>/usr/lib/hcatalog/share/hcatalog/hcatalog-core.jar</value>
-    <description>Hive auxiliary jar path.</description>
-  </property>
-  <property>
-    <name>hive_user</name>
-    <value>hive</value>
-    <description>Hive User.</description>
-  </property>
-
-  <!--HCAT-->
-
-  <property>
-    <name>hcat_log_dir</name>
-    <value>/var/log/webhcat</value>
-    <description>WebHCat Log Dir.</description>
-  </property>
-  <property>
-    <name>hcat_pid_dir</name>
-    <value>/etc/run/webhcat</value>
-    <description>WebHCat Pid Dir.</description>
-  </property>
-  <property>
-    <name>hcat_user</name>
-    <value>hcat</value>
-    <description>HCat User.</description>
-  </property>
-  <property>
-    <name>webhcat_user</name>
-    <value>hcat</value>
-    <description>WebHCat User.</description>
-  </property>
-  
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/configuration/hive-site.xml
deleted file mode 100644
index 29ed54e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/configuration/hive-site.xml
+++ /dev/null
@@ -1,236 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<configuration>
-  <property>
-    <name>ambari.hive.db.schema.name</name>
-    <value>hive</value>
-    <description>Database name used as the Hive Metastore</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionURL</name>
-    <value>jdbc</value>
-    <description>JDBC connect string for a JDBC metastore</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionDriverName</name>
-    <value>com.mysql.jdbc.Driver</value>
-    <description>Driver class name for a JDBC metastore</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionUserName</name>
-    <value>hive</value>
-    <description>username to use against metastore database</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionPassword</name>
-    <value> </value>
-    <description>password to use against metastore database</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.warehouse.dir</name>
-    <value>/apps/hive/warehouse</value>
-    <description>location of default database for the warehouse</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.sasl.enabled</name>
-    <value></value>
-    <description>If true, the metastore thrift interface will be secured with SASL.
-      Clients must authenticate with Kerberos.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.kerberos.keytab.file</name>
-    <value></value>
-    <description>The path to the Kerberos Keytab file containing the metastore
-      thrift server's service principal.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.kerberos.principal</name>
-    <value></value>
-    <description>The service principal for the metastore thrift server. The special
-      string _HOST will be replaced automatically with the correct host name.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.cache.pinobjtypes</name>
-    <value>Table,Database,Type,FieldSchema,Order</value>
-    <description>List of comma separated metastore object types that should be pinned in the cache</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.uris</name>
-    <value>thrift://localhost:9083</value>
-    <description>URI for client to contact metastore server</description>
-  </property>
-
-  <property>
-    <name>hive.semantic.analyzer.factory.impl</name>
-    <value>org.apache.hivealog.cli.HCatSemanticAnalyzerFactory</value>
-    <description>controls which SemanticAnalyzerFactory implemenation class is used by CLI</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.client.socket.timeout</name>
-    <value>60</value>
-    <description>MetaStore Client socket timeout in seconds</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.execute.setugi</name>
-    <value>true</value>
-    <description>In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and     server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.</description>
-  </property>
-
-  <property>
-    <name>hive.security.authorization.enabled</name>
-    <value>false</value>
-    <description>enable or disable the hive client authorization</description>
-  </property>
-
-  <property>
-    <name>hive.security.authorization.manager</name>
-    <value>org.apache.hcatalog.security.HdfsAuthorizationProvider</value>
-    <description>the hive client authorization manager class name.
-      The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  </description>
-  </property>
-
-  <property>
-    <name>hive.server2.enable.doAs</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>fs.hdfs.impl.disable.cache</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>fs.file.impl.disable.cache</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>hive.enforce.bucketing</name>
-    <value>true</value>
-    <description>Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced.</description>
-  </property>
-
-  <property>
-    <name>hive.enforce.sorting</name>
-    <value>true</value>
-    <description>Whether sorting is enforced. If true, while inserting into the table, sorting is enforced.</description>
-  </property>
-
-  <property>
-    <name>hive.map.aggr</name>
-    <value>true</value>
-    <description>Whether to use map-side aggregation in Hive Group By queries.</description>
-  </property>
-
-  <property>
-    <name>hive.optimize.bucketmapjoin</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>hive.optimize.bucketmapjoin.sortedmerge</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>hive.mapred.reduce.tasks.speculative.execution</name>
-    <value>false</value>
-    <description>Whether speculative execution for reducers should be turned on.</description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.join</name>
-    <value>true</value>
-    <description>Whether Hive enable the optimization about converting common
-      join into mapjoin based on the input file size.</description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.sortmerge.join</name>
-    <value>true</value>
-    <description>Will the join be automatically converted to a sort-merge join, if the joined tables pass
-      the criteria for sort-merge join.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.sortmerge.join.noconditionaltask</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.join.noconditionaltask</name>
-    <value>true</value>
-    <description>Whether Hive enable the optimization about converting common join into mapjoin based on the input file
-      size. If this paramater is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the
-      specified size, the join is directly converted to a mapjoin (there is no conditional task).
-    </description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.join.noconditionaltask.size</name>
-    <value>1000000000</value>
-    <description>If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. However, if it
-      is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, the join is directly
-      converted to a mapjoin(there is no conditional task). The default is 10MB.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.optimize.reducededuplication.min.reducer</name>
-    <value>1</value>
-    <description>Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS.
-      That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.
-      The optimization will be disabled if number of reducers is less than specified value.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.optimize.mapjoin.mapreduce</name>
-    <value>true</value>
-    <description>If hive.auto.convert.join is off, this parameter does not take
-      affect. If it is on, and if there are map-join jobs followed by a map-reduce
-      job (for e.g a group by), each map-only job is merged with the following
-      map-reduce job.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.mapjoin.bucket.cache.size</name>
-    <value>10000</value>
-    <description>
-      Size per reducer.The default is 1G, i.e if the input size is 10G, it
-      will use 10 reducers.
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/metainfo.xml
deleted file mode 100644
index 0a0f8fa..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/metainfo.xml
+++ /dev/null
@@ -1,186 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>HIVE</name>
-      <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
-      <version>0.11.0.1.3.3.0</version>
-      <components>
-
-        <component>
-          <name>HIVE_METASTORE</name>
-          <category>MASTER</category>
-          <!-- may be 0 if specifying external metastore, how to specify this? -->
-          <cardinality>1</cardinality>
-          <auto-deploy>
-            <enabled>true</enabled>
-            <co-locate>HIVE/HIVE_SERVER</co-locate>
-          </auto-deploy>
-          <commandScript>
-            <script>scripts/hive_metastore.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>HIVE_SERVER</name>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <dependencies>
-            <dependency>
-              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
-              <scope>cluster</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-                <co-locate>HIVE/HIVE_SERVER</co-locate>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/hive_server.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>MYSQL_SERVER</name>
-          <category>MASTER</category>
-          <!-- may be 0 if specifying external db, how to specify this? -->
-          <cardinality>1</cardinality>
-          <auto-deploy>
-            <enabled>true</enabled>
-            <co-locate>HIVE/HIVE_SERVER</co-locate>
-          </auto-deploy>
-          <commandScript>
-            <script>scripts/mysql_server.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>HIVE_CLIENT</name>
-          <category>CLIENT</category>
-          <cardinality>0+</cardinality>
-          <commandScript>
-            <script>scripts/hive_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>hive</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>mysql-connector-java</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>mysql</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osType>centos6</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>mysql-server</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osType>centos5</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>mysql-server</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osType>suse</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>mysql-client</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <configuration-dependencies>
-        <config-type>hive-site</config-type>
-        <config-type>global</config-type>
-      </configuration-dependencies>
-    </service>
-
-    <service>
-      <name>HCATALOG</name>
-      <comment>This is comment for HCATALOG service</comment>
-      <version>0.11.0.1.3.3.0</version>
-      <components>
-        <component>
-          <name>HCAT</name>
-          <category>CLIENT</category>
-          <commandScript>
-            <script>scripts/hcat_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>hcatalog</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <configuration-dependencies>
-        <config-type>global</config-type>
-      </configuration-dependencies>
-
-    </service>
-
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/files/addMysqlUser.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/files/addMysqlUser.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/files/addMysqlUser.sh
deleted file mode 100644
index 8d31b91..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/files/addMysqlUser.sh
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-mysqldservice=$1
-mysqldbuser=$2
-mysqldbpasswd=$3
-mysqldbhost=$4
-myhostname=$(hostname -f)
-
-service $mysqldservice start
-echo "Adding user $mysqldbuser@$mysqldbhost and $mysqldbuser@localhost"
-mysql -u root -e "CREATE USER '$mysqldbuser'@'$mysqldbhost' IDENTIFIED BY '$mysqldbpasswd';"
-mysql -u root -e "CREATE USER '$mysqldbuser'@'localhost' IDENTIFIED BY '$mysqldbpasswd';"
-mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'$mysqldbhost';"
-mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'localhost';"
-if [ '$(mysql -u root -e "select user from mysql.user where user='$mysqldbuser' and host='$myhostname'" | grep "$mysqldbuser")' != '0' ]; then
-  echo "Adding user $mysqldbuser@$myhostname";
-  mysql -u root -e "CREATE USER '$mysqldbuser'@'$myhostname' IDENTIFIED BY '$mysqldbpasswd';";
-  mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'$myhostname';";
-fi
-mysql -u root -e "flush privileges;"
-service $mysqldservice stop

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/files/hcatSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/files/hcatSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/files/hcatSmoke.sh
deleted file mode 100644
index 9e7b33f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/files/hcatSmoke.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-export tablename=$1
-
-case "$2" in
-
-prepare)
-  hcat -e "show tables"
-  hcat -e "drop table IF EXISTS ${tablename}"
-  hcat -e "create table ${tablename} ( id INT, name string ) stored as rcfile ;"
-;;
-
-cleanup)
-  hcat -e "drop table IF EXISTS ${tablename}"
-;;
-
-esac

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/files/hiveSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/files/hiveSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/files/hiveSmoke.sh
deleted file mode 100644
index 7e03524..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/files/hiveSmoke.sh
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-export tablename=$1
-echo "CREATE EXTERNAL TABLE IF NOT EXISTS ${tablename} ( foo INT, bar STRING );" | hive
-echo "DESCRIBE ${tablename};" | hive

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/files/hiveserver2.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/files/hiveserver2.sql b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/files/hiveserver2.sql
deleted file mode 100644
index 99a3865..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/files/hiveserver2.sql
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-CREATE EXTERNAL TABLE IF NOT EXISTS hiveserver2smoke20408 ( foo INT, bar STRING );
-DESCRIBE hiveserver2smoke20408;

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/files/hiveserver2Smoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/files/hiveserver2Smoke.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/files/hiveserver2Smoke.sh
deleted file mode 100644
index 051a21e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/files/hiveserver2Smoke.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-smokeout=`/usr/lib/hive/bin/beeline -u $1 -n fakeuser -p fakepwd -d org.apache.hive.jdbc.HiveDriver -e '!run $2' 2>&1| awk '{print}'|grep Error`
-
-if [ "x$smokeout" == "x" ]; then
-  echo "Smoke test of hiveserver2 passed"
-  exit 0
-else
-  echo "Smoke test of hiveserver2 wasnt passed"
-  echo $smokeout
-  exit 1
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/files/pigSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/files/pigSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/files/pigSmoke.sh
deleted file mode 100644
index 2e90ac0..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/files/pigSmoke.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License
-
-A = load 'passwd' using PigStorage(':');
-B = foreach A generate \$0 as id;
-store B into 'pigsmoke.out';

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/files/startHiveserver2.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/files/startHiveserver2.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/files/startHiveserver2.sh
deleted file mode 100644
index fa90c2f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/files/startHiveserver2.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-HIVE_CONF_DIR=$4 /usr/lib/hive/bin/hiveserver2 -hiveconf hive.metastore.uris=' ' > $1 2> $2 &
-echo $!|cat>$3

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/files/startMetastore.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/files/startMetastore.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/files/startMetastore.sh
deleted file mode 100644
index 9350776..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/files/startMetastore.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-HIVE_CONF_DIR=$4 hive --service metastore > $1 2> $2 &
-echo $!|cat>$3

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/__init__.py
deleted file mode 100644
index 5561e10..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hcat.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hcat.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hcat.py
deleted file mode 100644
index 2993d3a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hcat.py
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import sys
-
-
-def hcat():
-  import params
-
-  Directory(params.hcat_conf_dir,
-            owner=params.hcat_user,
-            group=params.user_group,
-  )
-
-  Directory(params.hcat_pid_dir,
-            owner=params.webhcat_user,
-            recursive=True
-  )
-
-  hcat_TemplateConfig('hcat-env.sh')
-
-
-def hcat_TemplateConfig(name):
-  import params
-
-  TemplateConfig(format("{hcat_conf_dir}/{name}"),
-                 owner=params.hcat_user,
-                 group=params.user_group
-  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hcat_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hcat_client.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hcat_client.py
deleted file mode 100644
index 54a8937..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hcat_client.py
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-from hcat import hcat
-
-class HCatClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    hcat()
-
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-
-if __name__ == "__main__":
-  HCatClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hcat_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hcat_service_check.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hcat_service_check.py
deleted file mode 100644
index 5112e99..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hcat_service_check.py
+++ /dev/null
@@ -1,63 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-def hcat_service_check():
-    import params
-
-    unique = get_unique_id_and_date()
-    output_file = format("/apps/hive/warehouse/hcatsmoke{unique}")
-    test_cmd = format("fs -test -e {output_file}")
-
-    if params.security_enabled:
-      kinit_cmd = format(
-        "{kinit_path_local} -kt {smoke_user_keytab} {smokeuser}; ")
-    else:
-      kinit_cmd = ""
-
-    File('/tmp/hcatSmoke.sh',
-         content=StaticFile("hcatSmoke.sh"),
-         mode=0755
-    )
-
-    prepare_cmd = format("{kinit_cmd}sh /tmp/hcatSmoke.sh hcatsmoke{unique} prepare")
-
-    Execute(prepare_cmd,
-            tries=3,
-            user=params.smokeuser,
-            try_sleep=5,
-            path=['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
-            logoutput=True)
-
-    ExecuteHadoop(test_cmd,
-                  user=params.hdfs_user,
-                  logoutput=True,
-                  conf_dir=params.hadoop_conf_dir)
-
-    cleanup_cmd = format("{kinit_cmd}sh /tmp/hcatSmoke.sh hcatsmoke{unique} cleanup")
-
-    Execute(cleanup_cmd,
-            tries=3,
-            user=params.smokeuser,
-            try_sleep=5,
-            path=['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
-            logoutput=True
-    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hive.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hive.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hive.py
deleted file mode 100644
index b37ebb2..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hive.py
+++ /dev/null
@@ -1,122 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import sys
-
-
-def hive(name=None):
-  import params
-
-  if name == 'metastore' or name == 'hiveserver2':
-    hive_config_dir = params.hive_server_conf_dir
-    config_file_mode = 0600
-    jdbc_connector()
-  else:
-    hive_config_dir = params.hive_conf_dir
-    config_file_mode = 0644
-
-  Directory(hive_config_dir,
-            owner=params.hive_user,
-            group=params.user_group,
-            recursive=True
-  )
-
-  XmlConfig("hive-site.xml",
-            conf_dir=hive_config_dir,
-            configurations=params.config['configurations']['hive-site'],
-            owner=params.hive_user,
-            group=params.user_group,
-            mode=config_file_mode
-  )
-
-  cmd = format("/bin/sh -c 'cd /usr/lib/ambari-agent/ && curl -kf --retry 5 "
-               "{jdk_location}{check_db_connection_jar_name} -o {check_db_connection_jar_name}'")
-
-  Execute(cmd,
-          not_if=format("[ -f {check_db_connection_jar_name}]"))
-
-  if name == 'metastore':
-    File(params.start_metastore_path,
-         mode=0755,
-         content=StaticFile('startMetastore.sh')
-    )
-
-  elif name == 'hiveserver2':
-    File(params.start_hiveserver2_path,
-         mode=0755,
-         content=StaticFile('startHiveserver2.sh')
-    )
-
-  if name != "client":
-    crt_directory(params.hive_pid_dir)
-    crt_directory(params.hive_log_dir)
-    crt_directory(params.hive_var_lib)
-
-  File(format("{hive_config_dir}/hive-env.sh"),
-       owner=params.hive_user,
-       group=params.user_group,
-       content=Template('hive-env.sh.j2', conf_dir=hive_config_dir)
-  )
-
-  crt_file(format("{hive_conf_dir}/hive-default.xml.template"))
-  crt_file(format("{hive_conf_dir}/hive-env.sh.template"))
-  crt_file(format("{hive_conf_dir}/hive-exec-log4j.properties.template"))
-  crt_file(format("{hive_conf_dir}/hive-log4j.properties.template"))
-
-
-def crt_directory(name):
-  import params
-
-  Directory(name,
-            recursive=True,
-            owner=params.hive_user,
-            group=params.user_group,
-            mode=0755)
-
-
-def crt_file(name):
-  import params
-
-  File(name,
-       owner=params.hive_user,
-       group=params.user_group
-  )
-
-
-def jdbc_connector():
-  import params
-
-  if params.hive_jdbc_driver == "com.mysql.jdbc.Driver":
-    cmd = format("hive mkdir -p {artifact_dir} ; cp /usr/share/java/{jdbc_jar_name} {target}")
-
-    Execute(cmd,
-            not_if=format("test -f {target}"),
-            creates=params.target,
-            path=["/bin", "usr/bin/"])
-
-  elif params.hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
-    cmd = format(
-      "mkdir -p {artifact_dir} ; curl -kf --retry 10 {driver_curl_source} -o {driver_curl_target} &&  "
-      "cp {driver_curl_target} {target}")
-
-    Execute(cmd,
-            not_if=format("test -f {target}"),
-            path=["/bin", "usr/bin/"])

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hive_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hive_client.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hive_client.py
deleted file mode 100644
index 0a5fb2b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hive_client.py
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import sys
-from resource_management import *
-
-from hive import hive
-
-class HiveClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    hive(name='client')
-
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-if __name__ == "__main__":
-  HiveClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hive_metastore.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hive_metastore.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hive_metastore.py
deleted file mode 100644
index c741174..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hive_metastore.py
+++ /dev/null
@@ -1,63 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from hive import hive
-from hive_service import hive_service
-
-class HiveMetastore(Script):
-
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    hive(name='metastore')
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env) # FOR SECURITY
-    hive_service( 'metastore',
-                   action = 'start'
-    )
-
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    hive_service( 'metastore',
-                   action = 'stop'
-    )
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    pid_file = format("{hive_pid_dir}/{hive_metastore_pid}")
-    # Recursively check all existing gmetad pid files
-    check_process_status(pid_file)
-
-if __name__ == "__main__":
-  HiveMetastore().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hive_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hive_server.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hive_server.py
deleted file mode 100644
index 3ad81a1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HIVE/package/scripts/hive_server.py
+++ /dev/null
@@ -1,63 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from hive import hive
-from hive_service import hive_service
-
-class HiveServer(Script):
-
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    hive(name='hiveserver2')
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env) # FOR SECURITY
-    hive_service( 'hiveserver2',
-                  action = 'start'
-    )
-
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    hive_service( 'hiveserver2',
-                  action = 'stop'
-    )
-
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    pid_file = format("{hive_pid_dir}/{hive_pid}")
-    # Recursively check all existing gmetad pid files
-    check_process_status(pid_file)
-
-if __name__ == "__main__":
-  HiveServer().execute()


[08/12] AMBARI-4336. Move 1.3.4 stack to 1.3.3 using the python libraries. (mahadev)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/scripts/params.py
deleted file mode 100644
index 95880cb..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/scripts/params.py
+++ /dev/null
@@ -1,84 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import functions
-import status_params
-
-# server configurations
-config = Script.get_config()
-
-conf_dir = "/etc/hbase/conf"
-daemon_script = "/usr/lib/hbase/bin/hbase-daemon.sh"
-
-hbase_user = config['configurations']['global']['hbase_user']
-smokeuser = config['configurations']['global']['smokeuser']
-security_enabled = config['configurations']['global']['security_enabled']
-user_group = config['configurations']['global']['user_group']
-
-# this is "hadoop-metrics2-hbase.properties" for 2.x stacks
-metric_prop_file_name = "hadoop-metrics.properties" 
-
-# not supporting 32 bit jdk.
-java64_home = config['hostLevelParams']['java_home']
-
-log_dir = config['configurations']['global']['hbase_log_dir']
-master_heapsize = config['configurations']['global']['hbase_master_heapsize']
-
-regionserver_heapsize = config['configurations']['global']['hbase_regionserver_heapsize']
-regionserver_xmn_size = functions.calc_xmn_from_xms(regionserver_heapsize, 0.2, 512)
-
-pid_dir = status_params.pid_dir
-tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
-
-client_jaas_config_file = default('hbase_client_jaas_config_file', format("{conf_dir}/hbase_client_jaas.conf"))
-master_jaas_config_file = default('hbase_master_jaas_config_file', format("{conf_dir}/hbase_master_jaas.conf"))
-regionserver_jaas_config_file = default('hbase_regionserver_jaas_config_file', format("{conf_dir}/hbase_regionserver_jaas.conf"))
-
-ganglia_server_hosts = default('/clusterHostInfo/ganglia_server_host', []) # is not passed when ganglia is not present
-ganglia_server_host = '' if len(ganglia_server_hosts) == 0 else ganglia_server_hosts[0]
-
-rs_hosts = default('hbase_rs_hosts', config['clusterHostInfo']['slave_hosts']) #if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
-
-smoke_test_user = config['configurations']['global']['smokeuser']
-smokeuser_permissions = default('smokeuser_permissions', "RWXCA")
-service_check_data = get_unique_id_and_date()
-
-if security_enabled:
-  
-  _use_hostname_in_principal = default('instance_name', True)
-  _master_primary_name = config['configurations']['global']['hbase_master_primary_name']
-  _hostname = config['hostname']
-  _kerberos_domain = config['configurations']['global']['kerberos_domain']
-  _master_principal_name = config['configurations']['global']['hbase_master_principal_name']
-  _regionserver_primary_name = config['configurations']['global']['hbase_regionserver_primary_name']
-  
-  if _use_hostname_in_principal:
-    master_jaas_princ = format("{_master_primary_name}/{_hostname}@{_kerberos_domain}")
-    regionserver_jaas_princ = format("{_regionserver_primary_name}/{_hostname}@{_kerberos_domain}")
-  else:
-    master_jaas_princ = format("{_master_principal_name}@{_kerberos_domain}")
-    regionserver_jaas_princ = format("{_regionserver_primary_name}@{_kerberos_domain}")
-    
-master_keytab_path = config['configurations']['hbase-site']['hbase.master.keytab.file']
-regionserver_keytab_path = config['configurations']['hbase-site']['hbase.regionserver.keytab.file']
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-hbase_user_keytab = config['configurations']['global']['hbase_user_keytab']
-kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/scripts/service_check.py
deleted file mode 100644
index ff6d0ed..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/scripts/service_check.py
+++ /dev/null
@@ -1,89 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import functions
-
-
-class HbaseServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-    
-    output_file = "/apps/hbase/data/ambarismoketest"
-    test_cmd = format("fs -test -e {output_file}")
-    kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smoke_test_user};") if params.security_enabled else ""
-    hbase_servicecheck_file = '/tmp/hbase-smoke.sh'
-  
-    File( '/tmp/hbaseSmokeVerify.sh',
-      content = StaticFile("hbaseSmokeVerify.sh"),
-      mode = 0755
-    )
-  
-    File( hbase_servicecheck_file,
-      mode = 0755,
-      content = Template('hbase-smoke.sh.j2')
-    )
-    
-    if params.security_enabled:    
-      hbase_grant_premissions_file = '/tmp/hbase_grant_permissions.sh'
-      hbase_kinit_cmd = format("{kinit_path_local} -kt {hbase_user_keytab} {hbase_user};")
-      grantprivelegecmd = format("{hbase_kinit_cmd} hbase shell {hbase_grant_premissions_file}")
-  
-      File( hbase_grant_premissions_file,
-        owner   = params.hbase_user,
-        group   = params.user_group,
-        mode    = 0644,
-        content = Template('hbase_grant_permissions.j2')
-      )
-      
-      Execute( grantprivelegecmd,
-        user = params.hbase_user,
-      )
-
-    servicecheckcmd = format("{kinit_cmd} hbase --config {conf_dir} shell {hbase_servicecheck_file}")
-    smokeverifycmd = format("{kinit_cmd} /tmp/hbaseSmokeVerify.sh {conf_dir} {service_check_data}")
-  
-    Execute( servicecheckcmd,
-      tries     = 3,
-      try_sleep = 5,
-      user = params.smoke_test_user,
-      logoutput = True
-    )
-  
-    Execute ( smokeverifycmd,
-      tries     = 3,
-      try_sleep = 5,
-      user = params.smoke_test_user,
-      logoutput = True
-    )
-    
-def main():
-  import sys
-  command_type = 'perform'
-  command_data_file = '/root/workspace/HBase/input.json'
-  basedir = '/root/workspace/HBase/main'
-  sys.argv = ["", command_type, command_data_file, basedir]
-  
-  HbaseServiceCheck().execute()
-  
-if __name__ == "__main__":
-  HbaseServiceCheck().execute()
-  

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/scripts/status_params.py
deleted file mode 100644
index c9b20ef..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/scripts/status_params.py
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-pid_dir = config['configurations']['global']['hbase_pid_dir']

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-MASTER.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-MASTER.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-MASTER.j2
deleted file mode 100644
index 1c75d15..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-MASTER.j2
+++ /dev/null
@@ -1,50 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See http://wiki.apache.org/hadoop/GangliaMetrics
-#
-# Make sure you know whether you are using ganglia 3.0 or 3.1.
-# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
-# And, yes, this file is named hadoop-metrics.properties rather than
-# hbase-metrics.properties because we're leveraging the hadoop metrics
-# package and hadoop-metrics.properties is an hardcoded-name, at least
-# for the moment.
-#
-# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
-
-# HBase-specific configuration to reset long-running stats (e.g. compactions)
-# If this variable is left out, then the default is no expiration.
-hbase.extendedperiod = 3600
-
-# Configuration of the "hbase" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-hbase.period=10
-hbase.servers={{ganglia_server_host}}:8663
-
-# Configuration of the "jvm" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-jvm.period=10
-jvm.servers={{ganglia_server_host}}:8663
-
-# Configuration of the "rpc" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-rpc.period=10
-rpc.servers={{ganglia_server_host}}:8663

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-RS.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-RS.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-RS.j2
deleted file mode 100644
index e971e13..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-RS.j2
+++ /dev/null
@@ -1,50 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See http://wiki.apache.org/hadoop/GangliaMetrics
-#
-# Make sure you know whether you are using ganglia 3.0 or 3.1.
-# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
-# And, yes, this file is named hadoop-metrics.properties rather than
-# hbase-metrics.properties because we're leveraging the hadoop metrics
-# package and hadoop-metrics.properties is an hardcoded-name, at least
-# for the moment.
-#
-# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
-
-# HBase-specific configuration to reset long-running stats (e.g. compactions)
-# If this variable is left out, then the default is no expiration.
-hbase.extendedperiod = 3600
-
-# Configuration of the "hbase" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-hbase.period=10
-hbase.servers={{ganglia_server_host}}:8656
-
-# Configuration of the "jvm" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-jvm.period=10
-jvm.servers={{ganglia_server_host}}:8656
-
-# Configuration of the "rpc" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-rpc.period=10
-rpc.servers={{ganglia_server_host}}:8656

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/templates/hadoop-metrics.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/templates/hadoop-metrics.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/templates/hadoop-metrics.properties.j2
deleted file mode 100644
index 1c75d15..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/templates/hadoop-metrics.properties.j2
+++ /dev/null
@@ -1,50 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See http://wiki.apache.org/hadoop/GangliaMetrics
-#
-# Make sure you know whether you are using ganglia 3.0 or 3.1.
-# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
-# And, yes, this file is named hadoop-metrics.properties rather than
-# hbase-metrics.properties because we're leveraging the hadoop metrics
-# package and hadoop-metrics.properties is an hardcoded-name, at least
-# for the moment.
-#
-# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
-
-# HBase-specific configuration to reset long-running stats (e.g. compactions)
-# If this variable is left out, then the default is no expiration.
-hbase.extendedperiod = 3600
-
-# Configuration of the "hbase" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-hbase.period=10
-hbase.servers={{ganglia_server_host}}:8663
-
-# Configuration of the "jvm" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-jvm.period=10
-jvm.servers={{ganglia_server_host}}:8663
-
-# Configuration of the "rpc" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-rpc.period=10
-rpc.servers={{ganglia_server_host}}:8663

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/templates/hbase-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/templates/hbase-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/templates/hbase-env.sh.j2
deleted file mode 100644
index b8505b5..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/templates/hbase-env.sh.j2
+++ /dev/null
@@ -1,82 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# Set environment variables here.
-
-# The java implementation to use. Java 1.6 required.
-export JAVA_HOME={{java64_home}}
-
-# HBase Configuration directory
-export HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{conf_dir}}}
-
-# Extra Java CLASSPATH elements. Optional.
-export HBASE_CLASSPATH=${HBASE_CLASSPATH}
-
-# The maximum amount of heap to use, in MB. Default is 1000.
-# export HBASE_HEAPSIZE=1000
-
-# Extra Java runtime options.
-# Below are what we set by default. May only work with SUN JVM.
-# For more on why as well as other possible settings,
-# see http://wiki.apache.org/hadoop/PerformanceTuning
-export HBASE_OPTS="-XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log"
-export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`"
-# Uncomment below to enable java garbage collection logging.
-# export HBASE_OPTS="$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log"
-
-# Uncomment and adjust to enable JMX exporting
-# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.
-# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
-#
-# export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
-export HBASE_MASTER_OPTS="-Xmx{{master_heapsize}}"
-export HBASE_REGIONSERVER_OPTS="-Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}"
-# export HBASE_THRIFT_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"
-# export HBASE_ZOOKEEPER_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"
-
-# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.
-export HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers
-
-# Extra ssh options. Empty by default.
-# export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR"
-
-# Where log files are stored. $HBASE_HOME/logs by default.
-export HBASE_LOG_DIR={{log_dir}}
-
-# A string representing this instance of hbase. $USER by default.
-# export HBASE_IDENT_STRING=$USER
-
-# The scheduling priority for daemon processes. See 'man nice'.
-# export HBASE_NICENESS=10
-
-# The directory where pid files are stored. /tmp by default.
-export HBASE_PID_DIR={{pid_dir}}
-
-# Seconds to sleep between slave commands. Unset by default. This
-# can be useful in large clusters, where, e.g., slave rsyncs can
-# otherwise arrive faster than the master can service them.
-# export HBASE_SLAVE_SLEEP=0.1
-
-# Tell HBase whether it should manage it's own instance of Zookeeper or not.
-export HBASE_MANAGES_ZK=false
-
-{% if security_enabled %}
-export HBASE_OPTS="$HBASE_OPTS -Djava.security.auth.login.config={{client_jaas_config_file}}"
-export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Djava.security.auth.login.config={{master_jaas_config_file}}"
-export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Djava.security.auth.login.config={{regionserver_jaas_config_file}}"
-{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/templates/hbase-smoke.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/templates/hbase-smoke.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/templates/hbase-smoke.sh.j2
deleted file mode 100644
index 61fe62f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/templates/hbase-smoke.sh.j2
+++ /dev/null
@@ -1,26 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-disable 'ambarismoketest'
-drop 'ambarismoketest'
-create 'ambarismoketest','family'
-put 'ambarismoketest','row01','family:col01','{{service_check_data}}'
-scan 'ambarismoketest'
-exit
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/templates/hbase_client_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/templates/hbase_client_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/templates/hbase_client_jaas.conf.j2
deleted file mode 100644
index 3b3bb18..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/templates/hbase_client_jaas.conf.j2
+++ /dev/null
@@ -1,23 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-Client {
-com.sun.security.auth.module.Krb5LoginModule required
-useKeyTab=false
-useTicketCache=true;
-};

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/templates/hbase_grant_permissions.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/templates/hbase_grant_permissions.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/templates/hbase_grant_permissions.j2
deleted file mode 100644
index 9102d35..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/templates/hbase_grant_permissions.j2
+++ /dev/null
@@ -1,21 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-grant '{{smoke_test_user}}', '{{smokeuser_permissions}}'
-exit
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/templates/hbase_master_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/templates/hbase_master_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/templates/hbase_master_jaas.conf.j2
deleted file mode 100644
index 9cf35d3..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/templates/hbase_master_jaas.conf.j2
+++ /dev/null
@@ -1,25 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-Client {
-com.sun.security.auth.module.Krb5LoginModule required
-useKeyTab=true
-storeKey=true
-useTicketCache=false
-keyTab="{{master_keytab_path}}"
-principal="{{master_jaas_princ}}";
-};

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2
deleted file mode 100644
index bd1d727..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2
+++ /dev/null
@@ -1,25 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-Client {
-com.sun.security.auth.module.Krb5LoginModule required
-useKeyTab=true
-storeKey=true
-useTicketCache=false
-keyTab="{{regionserver_keytab_path}}"
-principal="{{regionserver_jaas_princ}}";
-};

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/templates/regionservers.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/templates/regionservers.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/templates/regionservers.j2
deleted file mode 100644
index b22ae5f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/templates/regionservers.j2
+++ /dev/null
@@ -1,2 +0,0 @@
-{% for host in rs_hosts %}{{host}}
-{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/configuration/core-site.xml
deleted file mode 100644
index 8c43295..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/configuration/core-site.xml
+++ /dev/null
@@ -1,253 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
- <!--
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
- 
-        http://www.apache.org/licenses/LICENSE-2.0
- 
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
- -->
- 
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-<!-- i/o properties -->
-
-  <property>
-    <name>io.file.buffer.size</name>
-    <value>131072</value>
-    <description>The size of buffer for use in sequence files.
-  The size of this buffer should probably be a multiple of hardware
-  page size (4096 on Intel x86), and it determines how much data is
-  buffered during read and write operations.</description>
-  </property>
-
-  <property>
-    <name>io.serializations</name>
-    <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
-  </property>
-
-  <property>
-    <name>io.compression.codecs</name>
-    <value>org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec,com.hadoop.compression.lzo.LzoCodec,com.hadoop.compression.lzo.LzopCodec,org.apache.hadoop.io.compress.SnappyCodec</value>
-    <description>A list of the compression codec classes that can be used
-                 for compression/decompression.</description>
-  </property>
-
-
-  <property>
-    <name>io.compression.codec.lzo.class</name>
-    <value>com.hadoop.compression.lzo.LzoCodec</value>
-    <description>The implementation for lzo codec.</description>
-  </property>
-
-
-<!-- file system properties -->
-
-  <property>
-    <name>fs.default.name</name>
-    <!-- cluster variant -->
-    <value>hdfs://localhost:8020</value>
-    <description>The name of the default file system.  Either the
-  literal string "local" or a host:port for NDFS.</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>fs.trash.interval</name>
-    <value>360</value>
-    <description>Number of minutes between trash checkpoints.
-  If zero, the trash feature is disabled.
-  </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.dir</name>
-    <value>/hadoop/hdfs/namesecondary</value>
-    <description>Determines where on the local filesystem the DFS secondary
-        name node should store the temporary images to merge.
-        If this is a comma-delimited list of directories then the image is
-        replicated in all of the directories for redundancy.
-    </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.edits.dir</name>
-    <value>${fs.checkpoint.dir}</value>
-    <description>Determines where on the local filesystem the DFS secondary
-        name node should store the temporary edits to merge.
-        If this is a comma-delimited list of directoires then teh edits is
-        replicated in all of the directoires for redundancy.
-        Default value is same as fs.checkpoint.dir
-    </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.period</name>
-    <value>21600</value>
-    <description>The number of seconds between two periodic checkpoints.
-  </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.size</name>
-    <value>67108864</value>
-    <description>The size of the current edit log (in bytes) that triggers
-       a periodic checkpoint even if the fs.checkpoint.period hasn't expired.
-  </description>
-  </property>
-
-  <!-- ipc properties: copied from kryptonite configuration -->
-  <property>
-    <name>ipc.client.idlethreshold</name>
-    <value>8000</value>
-    <description>Defines the threshold number of connections after which
-               connections will be inspected for idleness.
-  </description>
-  </property>
-
-  <property>
-    <name>ipc.client.connection.maxidletime</name>
-    <value>30000</value>
-    <description>The maximum time after which a client will bring down the
-               connection to the server.
-  </description>
-  </property>
-
-  <property>
-    <name>ipc.client.connect.max.retries</name>
-    <value>50</value>
-    <description>Defines the maximum number of retries for IPC connections.</description>
-  </property>
-
-  <!-- Web Interface Configuration -->
-  <property>
-    <name>webinterface.private.actions</name>
-    <value>false</value>
-    <description> If set to true, the web interfaces of JT and NN may contain
-                actions, such as kill job, delete file, etc., that should
-                not be exposed to public. Enable this option if the interfaces
-                are only reachable by those who have the right authorization.
-  </description>
-  </property>
-
- <property>
-   <name>hadoop.security.authentication</name>
-   <value>simple</value>
-   <description>
-   Set the authentication for the cluster. Valid values are: simple or
-   kerberos.
-   </description>
- </property>
-<property>
-  <name>hadoop.security.authorization</name>
-  <value></value>
-  <description>
-     Enable authorization for different protocols.
-  </description>
-</property>
-
-  <property>
-    <name>hadoop.security.auth_to_local</name>
-    <value></value>
-<description>The mapping from kerberos principal names to local OS user names.
-  So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
-  "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
-The translations rules have 3 sections:
-      base     filter    substitution
-The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.
-
-[1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
-[2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
-[2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
-
-The filter is a regex in parens that must the generated string for the rule to apply.
-
-"(.*%admin)" will take any string that ends in "%admin"
-"(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
-
-Finally, the substitution is a sed rule to translate a regex into a fixed string.
-
-"s/@ACME\.COM//" removes the first instance of "@ACME.COM".
-"s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
-"s/X/Y/g" replaces all of the "X" in the name with "Y"
-
-So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:
-
-RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-DEFAULT
-
-To also translate the names with a second component, you'd make the rules:
-
-RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-RULE:[2:$1@$0](.@ACME.ORG)s/@.//
-DEFAULT
-
-If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
-
-RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
-DEFAULT
-    </description>
-  </property>
-
-<!--
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("hcat_user").groups</name>
-  <value></value>
-  <description>
-     Proxy group for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("hcat_user").hosts</name>
-  <value></value>
-  <description>
-     Proxy host for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("oozie_user").groups</name>
-  <value></value>
-  <description>
-     Proxy group for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("oozie_user").hosts</name>
-  <value></value>
-  <description>
-     Proxy host for Hadoop.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("templeton_user").groups</name>
-  <value></value>
-  <description>
-    Proxy group for templeton.
-  </description>
-</property>
-
-<property>
-  <name>hadoop.proxyuser.scope.function_hdp_user("templeton_user").hosts</name>
-  <value></value>
-  <description>
-    Proxy host for templeton.
-  </description>
-</property>
--->
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/configuration/global.xml
deleted file mode 100644
index 04d51db..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/configuration/global.xml
+++ /dev/null
@@ -1,187 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>namenode_host</name>
-    <value></value>
-    <description>NameNode Host.</description>
-  </property>
-  <property>
-    <name>dfs_name_dir</name>
-    <value>/hadoop/hdfs/namenode</value>
-    <description>NameNode Directories.</description>
-  </property>
-  <property>
-    <name>snamenode_host</name>
-    <value></value>
-    <description>Secondary NameNode.</description>
-  </property>
-  <property>
-    <name>fs_checkpoint_dir</name>
-    <value>/hadoop/hdfs/namesecondary</value>
-    <description>Secondary NameNode checkpoint dir.</description>
-  </property>
-  <property>
-    <name>datanode_hosts</name>
-    <value></value>
-    <description>List of Datanode Hosts.</description>
-  </property>
-  <property>
-    <name>dfs_data_dir</name>
-    <value>/hadoop/hdfs/data</value>
-    <description>Data directories for Data Nodes.</description>
-  </property>
-  <property>
-    <name>hdfs_log_dir_prefix</name>
-    <value>/var/log/hadoop</value>
-    <description>Hadoop Log Dir Prefix</description>
-  </property>
-  <property>
-    <name>hadoop_pid_dir_prefix</name>
-    <value>/var/run/hadoop</value>
-    <description>Hadoop PID Dir Prefix</description>
-  </property>
-  <property>
-    <name>dfs_webhdfs_enabled</name>
-    <value>true</value>
-    <description>WebHDFS enabled</description>
-  </property>
-  <property>
-    <name>hadoop_heapsize</name>
-    <value>1024</value>
-    <description>Hadoop maximum Java heap size</description>
-  </property>
-  <property>
-    <name>namenode_heapsize</name>
-    <value>1024</value>
-    <description>NameNode Java heap size</description>
-  </property>
-  <property>
-    <name>namenode_opt_newsize</name>
-    <value>200</value>
-    <description>NameNode new generation size</description>
-  </property>
-  <property>
-    <name>namenode_opt_maxnewsize</name>
-    <value>200</value>
-    <description>NameNode maximum new generation size</description>
-  </property>
-  <property>
-    <name>datanode_du_reserved</name>
-    <value>1</value>
-    <description>Reserved space for HDFS</description>
-  </property>
-  <property>
-    <name>dtnode_heapsize</name>
-    <value>1024</value>
-    <description>DataNode maximum Java heap size</description>
-  </property>
-  <property>
-    <name>dfs_datanode_failed_volume_tolerated</name>
-    <value>0</value>
-    <description>DataNode volumes failure toleration</description>
-  </property>
-  <property>
-    <name>fs_checkpoint_period</name>
-    <value>21600</value>
-    <description>HDFS Maximum Checkpoint Delay</description>
-  </property>
-  <property>
-    <name>fs_checkpoint_size</name>
-    <value>0.5</value>
-    <description>FS Checkpoint Size.</description>
-  </property>
-  <property>
-    <name>proxyuser_group</name>
-    <value>users</value>
-    <description>Proxy user group.</description>
-  </property>
-  <property>
-    <name>dfs_exclude</name>
-    <value></value>
-    <description>HDFS Exclude hosts.</description>
-  </property>
-  <property>
-    <name>dfs_include</name>
-    <value></value>
-    <description>HDFS Include hosts.</description>
-  </property>
-  <property>
-    <name>dfs_replication</name>
-    <value>3</value>
-    <description>Default Block Replication.</description>
-  </property>
-  <property>
-    <name>dfs_block_local_path_access_user</name>
-    <value>hbase</value>
-    <description>Default Block Replication.</description>
-  </property>
-  <property>
-    <name>dfs_datanode_address</name>
-    <value>50010</value>
-    <description>Port for datanode address.</description>
-  </property>
-  <property>
-    <name>dfs_datanode_http_address</name>
-    <value>50075</value>
-    <description>Port for datanode address.</description>
-  </property>
-  <property>
-    <name>dfs_datanode_data_dir_perm</name>
-    <value>750</value>
-    <description>Datanode dir perms.</description>
-  </property>
-
-  <property>
-    <name>security_enabled</name>
-    <value>false</value>
-    <description>Hadoop Security</description>
-  </property>
-  <property>
-    <name>kerberos_domain</name>
-    <value>EXAMPLE.COM</value>
-    <description>Kerberos realm.</description>
-  </property>
-  <property>
-    <name>kadmin_pw</name>
-    <value></value>
-    <description>Kerberos realm admin password</description>
-  </property>
-  <property>
-    <name>keytab_path</name>
-    <value>/etc/security/keytabs</value>
-    <description>Kerberos keytab path.</description>
-  </property>
-
-    <property>
-    <name>namenode_formatted_mark_dir</name>
-    <value>/var/run/hadoop/hdfs/namenode/formatted/</value>
-    <description>Formatteed Mark Directory.</description>
-  </property>
-    <property>
-    <name>hdfs_user</name>
-    <value>hdfs</value>
-    <description>User and Groups.</description>
-  </property>
-  
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/configuration/hadoop-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/configuration/hadoop-policy.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/configuration/hadoop-policy.xml
deleted file mode 100644
index 900da99..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/configuration/hadoop-policy.xml
+++ /dev/null
@@ -1,134 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientProtocol, which is used by user code
-    via the DistributedFileSystem.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.client.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
-    for block recovery.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for DatanodeProtocol, which is used by datanodes to
-    communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
-    for updating generation timestamp.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.namenode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for NamenodeProtocol, the protocol used by the secondary
-    namenode to communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.tracker.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterTrackerProtocol, used by the tasktrackers to
-    communicate with the jobtracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.job.submission.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for JobSubmissionProtocol, used by job clients to
-    communciate with the jobtracker for job submission, querying job status etc.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.task.umbilical.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
-    tasks to communicate with the parent tasktracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
- <property>
-    <name>security.admin.operations.protocol.acl</name>
-    <value></value>
-    <description>ACL for AdminOperationsProtocol. Used for admin commands.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.refresh.usertogroups.mappings.protocol.acl</name>
-    <value></value>
-    <description>ACL for RefreshUserMappingsProtocol. Used to refresh
-    users mappings. The ACL is a comma-separated list of user and
-    group names. The user and group list is separated by a blank. For
-    e.g. "alice,bob users,wheel".  A special value of "*" means all
-    users are allowed.</description>
-  </property>
-
-<property>
-    <name>security.refresh.policy.protocol.acl</name>
-    <value></value>
-    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
-    dfsadmin and mradmin commands to refresh the security policy in-effect.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/configuration/hdfs-site.xml
deleted file mode 100644
index 1fc6c59..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/configuration/hdfs-site.xml
+++ /dev/null
@@ -1,476 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-
-  <!-- file system properties -->
-
-  <property>
-    <name>dfs.name.dir</name>
-    <!-- cluster variant -->
-    <value>/hadoop/hdfs/namenode</value>
-    <description>Determines where on the local filesystem the DFS name node
-      should store the name table.  If this is a comma-delimited list
-      of directories then the name table is replicated in all of the
-      directories, for redundancy. </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.support.append</name>
-    <value>true</value>
-    <description>to enable dfs append</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.webhdfs.enabled</name>
-    <value>true</value>
-    <description>to enable webhdfs</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.datanode.socket.write.timeout</name>
-    <value>0</value>
-    <description>DFS Client write socket timeout</description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.failed.volumes.tolerated</name>
-    <value>0</value>
-    <description>#of failed disks dn would tolerate</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.block.local-path-access.user</name>
-    <value>hbase</value>
-    <description>the user who is allowed to perform short
-      circuit reads.
-    </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.data.dir</name>
-    <value>/hadoop/hdfs/data</value>
-    <description>Determines where on the local filesystem an DFS data node
-      should store its blocks.  If this is a comma-delimited
-      list of directories, then data will be stored in all named
-      directories, typically on different devices.
-      Directories that do not exist are ignored.
-    </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.hosts.exclude</name>
-    <value>/etc/hadoop/conf/dfs.exclude</value>
-    <description>Names a file that contains a list of hosts that are
-      not permitted to connect to the namenode.  The full pathname of the
-      file must be specified.  If the value is empty, no hosts are
-      excluded.</description>
-  </property>
-
-  <property>
-    <name>dfs.hosts</name>
-    <value>/etc/hadoop/conf/dfs.include</value>
-    <description>Names a file that contains a list of hosts that are
-      permitted to connect to the namenode. The full pathname of the file
-      must be specified.  If the value is empty, all hosts are
-      permitted.</description>
-  </property>
-
-  <property>
-    <name>dfs.replication.max</name>
-    <value>50</value>
-    <description>Maximal block replication.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.replication</name>
-    <value>3</value>
-    <description>Default block replication.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.heartbeat.interval</name>
-    <value>3</value>
-    <description>Determines datanode heartbeat interval in seconds.</description>
-  </property>
-
-  <property>
-    <name>dfs.safemode.threshold.pct</name>
-    <value>1.0f</value>
-    <description>
-      Specifies the percentage of blocks that should satisfy
-      the minimal replication requirement defined by dfs.replication.min.
-      Values less than or equal to 0 mean not to start in safe mode.
-      Values greater than 1 will make safe mode permanent.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.balance.bandwidthPerSec</name>
-    <value>6250000</value>
-    <description>
-      Specifies the maximum amount of bandwidth that each datanode
-      can utilize for the balancing purpose in term of
-      the number of bytes per second.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.https.port</name>
-    <value>50070</value>
-    <description>
-      This property is used by HftpFileSystem.
-    </description>
-  </property>
-
-  <property>
-    <name>ambari.dfs.datanode.port</name>
-    <value>50010</value>
-    <description>
-      The datanode port for data transfer. This property is effective only if referenced from dfs.datanode.address property.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.address</name>
-    <value>0.0.0.0:${ambari.dfs.datanode.port}</value>
-    <description>
-      The datanode server address and port for data transfer.
-    </description>
-  </property>
-
-  <property>
-    <name>ambari.dfs.datanode.http.port</name>
-    <value>50075</value>
-    <description>
-      The datanode http port. This property is effective only if referenced from dfs.datanode.http.address property.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.http.address</name>
-    <value>0.0.0.0:${ambari.dfs.datanode.http.port}</value>
-    <description>
-      The datanode http server address and port.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.block.size</name>
-    <value>134217728</value>
-    <description>The default block size for new files.</description>
-  </property>
-
-  <property>
-    <name>dfs.http.address</name>
-    <value>localhost:50070</value>
-    <description>The name of the default file system.  Either the
-      literal string "local" or a host:port for NDFS.</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.datanode.du.reserved</name>
-    <!-- cluster variant -->
-    <value>1073741824</value>
-    <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.ipc.address</name>
-    <value>0.0.0.0:8010</value>
-    <description>
-      The datanode ipc server address and port.
-      If the port is 0 then the server will start on a free port.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.blockreport.initialDelay</name>
-    <value>120</value>
-    <description>Delay for first block report in seconds.</description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.du.pct</name>
-    <value>0.85f</value>
-    <description>When calculating remaining space, only use this percentage of the real available space
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.handler.count</name>
-    <value>40</value>
-    <description>The number of server threads for the namenode.</description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.max.xcievers</name>
-    <value>4096</value>
-    <description>PRIVATE CONFIG VARIABLE</description>
-  </property>
-
-  <!-- Permissions configuration -->
-
-  <property>
-    <name>dfs.umaskmode</name>
-    <value>077</value>
-    <description>
-      The octal umask used when creating files and directories.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.web.ugi</name>
-    <!-- cluster variant -->
-    <value>gopher,gopher</value>
-    <description>The user account used by the web interface.
-      Syntax: USERNAME,GROUP1,GROUP2, ...
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.permissions</name>
-    <value>true</value>
-    <description>
-      If "true", enable permission checking in HDFS.
-      If "false", permission checking is turned off,
-      but all other behavior is unchanged.
-      Switching from one parameter value to the other does not change the mode,
-      owner or group of files or directories.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.permissions.supergroup</name>
-    <value>hdfs</value>
-    <description>The name of the group of super-users.</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.handler.count</name>
-    <value>100</value>
-    <description>Added to grow Queue size so that more client connections are allowed</description>
-  </property>
-
-  <property>
-    <name>ipc.server.max.response.size</name>
-    <value>5242880</value>
-  </property>
-  <property>
-    <name>dfs.block.access.token.enable</name>
-    <value>true</value>
-    <description>
-      If "true", access tokens are used as capabilities for accessing datanodes.
-      If "false", no access tokens are checked on accessing datanodes.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.kerberos.principal</name>
-    <value></value>
-    <description>
-      Kerberos principal name for the NameNode
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.kerberos.principal</name>
-    <value></value>
-    <description>
-      Kerberos principal name for the secondary NameNode.
-    </description>
-  </property>
-
-
-  <!--
-    This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
-  -->
-  <property>
-    <name>dfs.namenode.kerberos.https.principal</name>
-    <value></value>
-    <description>The Kerberos principal for the host that the NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.kerberos.https.principal</name>
-    <value></value>
-    <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <!-- cluster variant -->
-    <name>dfs.secondary.http.address</name>
-    <value>localhost:50090</value>
-    <description>Address of secondary namenode web server</description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.https.port</name>
-    <value>50490</value>
-    <description>The https port where secondary-namenode binds</description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.principal</name>
-    <value></value>
-    <description>
-      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
-      HTTP SPENGO specification.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.keytab</name>
-    <value></value>
-    <description>
-      The Kerberos keytab file with the credentials for the
-      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.kerberos.principal</name>
-    <value></value>
-    <description>
-      The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.keytab.file</name>
-    <value></value>
-    <description>
-      Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.keytab.file</name>
-    <value></value>
-    <description>
-      Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.keytab.file</name>
-    <value></value>
-    <description>
-      The filename of the keytab file for the DataNode.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.https.port</name>
-    <value>50470</value>
-    <description>The https port where namenode binds</description>
-
-  </property>
-
-  <property>
-    <name>dfs.https.address</name>
-    <value>localhost:50470</value>
-    <description>The https address where namenode binds</description>
-
-  </property>
-
-  <property>
-    <name>dfs.datanode.data.dir.perm</name>
-    <value>750</value>
-    <description>The permissions that should be there on dfs.data.dir
-      directories. The datanode will not come up if the permissions are
-      different on existing dfs.data.dir directories. If the directories
-      don't exist, they will be created with this permission.</description>
-  </property>
-
-  <property>
-    <name>dfs.access.time.precision</name>
-    <value>0</value>
-    <description>The access time for HDFS file is precise upto this value.
-      The default value is 1 hour. Setting a value of 0 disables
-      access times for HDFS.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.cluster.administrators</name>
-    <value> hdfs</value>
-    <description>ACL for who all can view the default servlets in the HDFS</description>
-  </property>
-
-  <property>
-    <name>ipc.server.read.threadpool.size</name>
-    <value>5</value>
-    <description></description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.failed.volumes.tolerated</name>
-    <value>0</value>
-    <description>Number of failed disks datanode would tolerate</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.avoid.read.stale.datanode</name>
-    <value>true</value>
-    <description>
-      Indicate whether or not to avoid reading from stale datanodes whose
-      heartbeat messages have not been received by the namenode for more than a
-      specified time interval.
-    </description>
-  </property>
-  <property>
-    <name>dfs.namenode.avoid.write.stale.datanode</name>
-    <value>true</value>
-    <description>
-      Indicate whether or not to avoid writing to stale datanodes whose
-      heartbeat messages have not been received by the namenode for more than a
-      specified time interval.
-    </description>
-  </property>
-  <property>
-    <name>dfs.namenode.write.stale.datanode.ratio</name>
-    <value>1.0f</value>
-    <description>When the ratio of number stale datanodes to total datanodes marked is greater
-      than this ratio, stop avoiding writing to stale nodes so as to prevent causing hotspots.
-    </description>
-  </property>
-  <property>
-    <name>dfs.namenode.stale.datanode.interval</name>
-    <value>30000</value>
-    <description>Datanode is stale after not getting a heartbeat in this interval in ms</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/metainfo.xml
deleted file mode 100644
index 009acae..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/metainfo.xml
+++ /dev/null
@@ -1,146 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>HDFS</name>
-      <comment>Apache Hadoop Distributed File System</comment>
-      <version>1.2.0.1.3.3.0</version>
-
-      <components>
-        <component>
-          <name>NAMENODE</name>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <commandScript>
-            <script>scripts/namenode.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <customCommands>
-            <customCommand>
-              <name>DECOMMISSION</name>
-              <commandScript>
-                <script>scripts/namenode.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>600</timeout>
-              </commandScript>
-            </customCommand>
-          </customCommands>
-        </component>
-
-        <component>
-          <name>DATANODE</name>
-          <category>SLAVE</category>
-          <cardinality>1+</cardinality>
-          <commandScript>
-            <script>scripts/datanode.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>SECONDARY_NAMENODE</name>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <commandScript>
-            <script>scripts/snamenode.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>HDFS_CLIENT</name>
-          <category>CLIENT</category>
-          <cardinality>0+</cardinality>
-          <commandScript>
-            <script>scripts/hdfs_client.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>lzo</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hadoop</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hadoop-libhdfs</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hadoop-native</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hadoop-pipes</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hadoop-sbin</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hadoop-lzo</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hadoop-lzo-native</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>snappy</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>snappy-devel</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>ambari-log4j</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <configuration-dependencies>
-        <config-type>core-site</config-type>
-        <config-type>global</config-type>
-        <config-type>hdfs-site</config-type>
-        <config-type>hadoop-policy</config-type>
-      </configuration-dependencies>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/files/checkForFormat.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/files/checkForFormat.sh
deleted file mode 100644
index d14091a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/files/checkForFormat.sh
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-export hdfs_user=$1
-shift
-export conf_dir=$1
-shift
-export mark_dir=$1
-shift
-export name_dirs=$*
-
-export EXIT_CODE=0
-export command="namenode -format"
-export list_of_non_empty_dirs=""
-
-mark_file=/var/run/hadoop/hdfs/namenode-formatted
-if [[ -f ${mark_file} ]] ; then
-  rm -f ${mark_file}
-  mkdir -p ${mark_dir}
-fi
-
-if [[ ! -d $mark_dir ]] ; then
-  for dir in `echo $name_dirs | tr ',' ' '` ; do
-    echo "NameNode Dirname = $dir"
-    cmd="ls $dir | wc -l  | grep -q ^0$"
-    eval $cmd
-    if [[ $? -ne 0 ]] ; then
-      (( EXIT_CODE = $EXIT_CODE + 1 ))
-      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
-    fi
-  done
-
-  if [[ $EXIT_CODE == 0 ]] ; then
-    su - ${hdfs_user} -c "yes Y | hadoop --config ${conf_dir} ${command}"
-  else
-    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
-  fi
-else
-  echo "${mark_dir} exists. Namenode DFS already formatted"
-fi
-
-exit $EXIT_CODE
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/files/checkWebUI.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/files/checkWebUI.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/files/checkWebUI.py
deleted file mode 100644
index f8e9c1a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/files/checkWebUI.py
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import optparse
-import httplib
-
-#
-# Main.
-#
-def main():
-  parser = optparse.OptionParser(usage="usage: %prog [options] component ")
-  parser.add_option("-m", "--hosts", dest="hosts", help="Comma separated hosts list for WEB UI to check it availability")
-  parser.add_option("-p", "--port", dest="port", help="Port of WEB UI to check it availability")
-
-  (options, args) = parser.parse_args()
-  
-  hosts = options.hosts.split(',')
-  port = options.port
-
-  for host in hosts:
-    try:
-      conn = httplib.HTTPConnection(host, port)
-      # This can be modified to get a partial url part to be sent with request
-      conn.request("GET", "/")
-      httpCode = conn.getresponse().status
-      conn.close()
-    except Exception:
-      httpCode = 404
-
-    if httpCode != 200:
-      print "Cannot access WEB UI on: http://" + host + ":" + port
-      exit(1)
-      
-
-if __name__ == "__main__":
-  main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/datanode.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/datanode.py
deleted file mode 100644
index eaa27cf..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/datanode.py
+++ /dev/null
@@ -1,57 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from hdfs_datanode import datanode
-
-
-class DataNode(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env)
-    env.set_params(params)
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-    self.config(env)
-    datanode(action="start")
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-    datanode(action="stop")
-
-  def config(self, env):
-    import params
-
-    datanode(action="configure")
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    check_process_status(status_params.datanode_pid_file)
-
-
-if __name__ == "__main__":
-  DataNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/hdfs_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/hdfs_client.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/hdfs_client.py
deleted file mode 100644
index ec24c7d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/hdfs_client.py
+++ /dev/null
@@ -1,52 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from utils import service
-
-
-class HdfsClient(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env)
-    env.set_params(params)
-    self.config(env)
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-  def config(self, env):
-    import params
-
-    pass
-
-
-if __name__ == "__main__":
-  HdfsClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/hdfs_datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/hdfs_datanode.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/hdfs_datanode.py
deleted file mode 100644
index aa7b5e6..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HDFS/package/scripts/hdfs_datanode.py
+++ /dev/null
@@ -1,59 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from utils import service
-import os
-
-def datanode(action=None):
-  import params
-
-  if action == "configure":
-    Directory(params.dfs_domain_socket_dir,
-              recursive=True,
-              mode=0750,
-              owner=params.hdfs_user,
-              group=params.user_group)
-    Directory(os.path.dirname(params.dfs_data_dir),
-              recursive=True,
-              mode=0755)
-    Directory(params.dfs_data_dir,
-              recursive=False,
-              mode=0750,
-              owner=params.hdfs_user,
-              group=params.user_group)
-
-  if action == "start":
-    service(
-      action=action, name="datanode",
-      user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True,
-      keytab=params.dfs_datanode_keytab_file,
-      principal=params.dfs_datanode_kerberos_principal
-    )
-  if action == "stop":
-    service(
-      action=action, name="datanode",
-      user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True,
-      keytab=params.dfs_datanode_keytab_file,
-      principal=params.dfs_datanode_kerberos_principal
-    )


[10/12] AMBARI-4336. Move 1.3.4 stack to 1.3.3 using the python libraries. (mahadev)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/log4j.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/log4j.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/log4j.properties.j2
deleted file mode 100644
index 577ad04..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/log4j.properties.j2
+++ /dev/null
@@ -1,200 +0,0 @@
-# Copyright 2011 The Apache Software Foundation
-# 
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Define some default values that can be overridden by system properties
-hadoop.root.logger=INFO,console
-hadoop.log.dir=.
-hadoop.log.file=hadoop.log
-
-
-# Define the root logger to the system property "hadoop.root.logger".
-log4j.rootLogger=${hadoop.root.logger}, EventCounter
-
-# Logging Threshold
-log4j.threshhold=ALL
-
-#
-# Daily Rolling File Appender
-#
-
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Rollver at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-# 30-day backup
-#log4j.appender.DRFA.MaxBackupIndex=30
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this 
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-
-#
-# TaskLog Appender
-#
-
-#Default values
-hadoop.tasklog.taskid=null
-hadoop.tasklog.iscleanup=false
-hadoop.tasklog.noKeepSplits=4
-hadoop.tasklog.totalLogFileSize=100
-hadoop.tasklog.purgeLogSplits=true
-hadoop.tasklog.logsRetainHours=12
-
-log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
-log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
-log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
-log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
-
-log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
-log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-
-#
-#Security audit appender
-#
-hadoop.security.logger=INFO,console
-hadoop.security.log.maxfilesize=256MB
-hadoop.security.log.maxbackupindex=20
-log4j.category.SecurityLogger=${hadoop.security.logger}
-hadoop.security.log.file=SecurityAuth.audit
-log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender 
-log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
-log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
-log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
-
-log4j.appender.RFAS=org.apache.log4j.RollingFileAppender 
-log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
-log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
-log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
-log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
-
-#
-# hdfs audit logging
-#
-hdfs.audit.logger=INFO,console
-log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
-log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
-log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
-log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
-log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
-
-#
-# mapred audit logging
-#
-mapred.audit.logger=INFO,console
-log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
-log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
-log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
-log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
-log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
-
-#
-# Rolling File Appender
-#
-
-log4j.appender.RFA=org.apache.log4j.RollingFileAppender
-log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Logfile size and and 30-day backups
-log4j.appender.RFA.MaxFileSize=256MB
-log4j.appender.RFA.MaxBackupIndex=10
-
-log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
-log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
-log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-# Custom Logging levels
-
-hadoop.metrics.log.level=INFO
-#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
-#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
-#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
-log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
-
-# Jets3t library
-log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
-
-#
-# Null Appender
-# Trap security logger on the hadoop client side
-#
-log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
-
-#
-# Event Counter Appender
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
-#
-log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
-
-{% if is_jtnode_master or is_rmnode_master %}
-#
-# Job Summary Appender 
-#
-# Use following logger to send summary to separate file defined by 
-# hadoop.mapreduce.jobsummary.log.file rolled daily:
-# hadoop.mapreduce.jobsummary.logger=INFO,JSA
-# 
-hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
-hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
-log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender
-
-log4j.appender.JSA.File={{hdfs_log_dir_prefix}}/{{mapred_user}}/${hadoop.mapreduce.jobsummary.log.file}
-
-log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
-log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-log4j.appender.JSA.DatePattern=.yyyy-MM-dd
-log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
-log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
-log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
-{% endif %}
-
-{{rca_prefix}}ambari.jobhistory.database={{ambari_db_rca_url}}
-{{rca_prefix}}ambari.jobhistory.driver={{ambari_db_rca_driver}}
-{{rca_prefix}}ambari.jobhistory.user={{ambari_db_rca_username}}
-{{rca_prefix}}ambari.jobhistory.password={{ambari_db_rca_password}}
-{{rca_prefix}}ambari.jobhistory.logger=DEBUG,JHA
-
-{{rca_prefix}}log4j.appender.JHA=org.apache.ambari.log4j.hadoop.mapreduce.jobhistory.JobHistoryAppender
-{{rca_prefix}}log4j.appender.JHA.database=${ambari.jobhistory.database}
-{{rca_prefix}}log4j.appender.JHA.driver=${ambari.jobhistory.driver}
-{{rca_prefix}}log4j.appender.JHA.user=${ambari.jobhistory.user}
-{{rca_prefix}}log4j.appender.JHA.password=${ambari.jobhistory.password}
-
-{{rca_prefix}}log4j.logger.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger=${ambari.jobhistory.logger}
-{{rca_prefix}}log4j.additivity.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger=true

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/slaves.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/slaves.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/slaves.j2
deleted file mode 100644
index cbcf6c3..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/slaves.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-{% for host in slave_hosts %}
-{{host}}
-{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/snmpd.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/snmpd.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/snmpd.conf.j2
deleted file mode 100644
index 3530444..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/snmpd.conf.j2
+++ /dev/null
@@ -1,48 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-com2sec notConfigUser  {{snmp_source}}   {{snmp_community}}
-group   notConfigGroup v1           notConfigUser
-group   notConfigGroup v2c           notConfigUser
-view    systemview    included   .1
-access  notConfigGroup ""      any       noauth    exact  systemview none none
-
-syslocation Hadoop 
-syscontact HadoopMaster 
-dontLogTCPWrappersConnects yes
-
-###############################################################################
-# disk checks
-
-disk / 10000
-
-
-###############################################################################
-# load average checks
-#
-
-# load [1MAX=12.0] [5MAX=12.0] [15MAX=12.0]
-#
-# 1MAX:   If the 1 minute load average is above this limit at query
-#         time, the errorFlag will be set.
-# 5MAX:   Similar, but for 5 min average.
-# 15MAX:  Similar, but for 15 min average.
-
-# Check for loads:
-#load 12 14 14
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/taskcontroller.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/taskcontroller.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/taskcontroller.cfg.j2
deleted file mode 100644
index d01d37e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/hooks/before-START/templates/taskcontroller.cfg.j2
+++ /dev/null
@@ -1,20 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-mapred.local.dir={{mapred_local_dir}}
-mapreduce.tasktracker.group={{mapred_tt_group}}
-hadoop.log.dir={{hdfs_log_dir_prefix}}/{{mapred_user}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/metainfo.xml
deleted file mode 100644
index ca45822..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/metainfo.xml
+++ /dev/null
@@ -1,22 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <versions>
-	  <active>true</active>
-    </versions>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/repos/repoinfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/repos/repoinfo.xml
deleted file mode 100644
index 444bc22..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/repos/repoinfo.xml
+++ /dev/null
@@ -1,75 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<reposinfo>
-  <os type="centos6">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos6/1.x/updates/1.3.3.0</baseurl>
-      <repoid>HDP-1.3.4</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="centos5">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos5/1.x/updates/1.3.3.0</baseurl>
-      <repoid>HDP-1.3.4</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="redhat6">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos6/1.x/updates/1.3.3.0</baseurl>
-      <repoid>HDP-1.3.4</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="redhat5">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos5/1.x/updates/1.3.3.0</baseurl>
-      <repoid>HDP-1.3.4</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="oraclelinux6">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos6/1.x/updates/1.3.3.0</baseurl>
-      <repoid>HDP-1.3.4</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="oraclelinux5">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos5/1.x/updates/1.3.3.0</baseurl>
-      <repoid>HDP-1.3.4</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="suse11">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/suse11/1.x/updates/1.3.3.0</baseurl>
-      <repoid>HDP-1.3.4</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-    <os type="sles11">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/suse11/1.x/updates/1.3.3.0</baseurl>
-      <repoid>HDP-1.3.4</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-</reposinfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/FLUME/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/FLUME/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/FLUME/configuration/global.xml
deleted file mode 100644
index f1fa4de..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/FLUME/configuration/global.xml
+++ /dev/null
@@ -1,24 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/FLUME/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/FLUME/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/FLUME/metainfo.xml
deleted file mode 100644
index bebb54e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/FLUME/metainfo.xml
+++ /dev/null
@@ -1,31 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>Flume is a distributed, reliable, and available system for efficiently collecting, aggregating and moving large amounts of log data from many different sources to a centralized data store.</comment>
-    <version>1.3.1.1.3.3.0</version>
-
-    <components>
-        <component>
-            <name>FLUME_SERVER</name>
-            <category>MASTER</category>
-            <cardinality>1</cardinality>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/configuration/global.xml
deleted file mode 100644
index 16df0b8..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/configuration/global.xml
+++ /dev/null
@@ -1,55 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>ganglia_conf_dir</name>
-    <value>/etc/ganglia/hdp</value>
-    <description>Config directory for Ganglia</description>
-  </property>
-  <property>
-    <name>ganglia_runtime_dir</name>
-    <value>/var/run/ganglia/hdp</value>
-    <description>Run directories for Ganglia</description>
-  </property>
-  <property>
-    <name>ganglia_runtime_dir</name>
-    <value>/var/run/ganglia/hdp</value>
-    <description>Run directories for Ganglia</description>
-  </property>
-  <property>
-    <name>gmetad_user</name>
-    <value>nobody</value>
-    <description>User </description>
-  </property>
-    <property>
-    <name>gmond_user</name>
-    <value>nobody</value>
-    <description>User </description>
-  </property>
-  <property>
-    <name>rrdcached_base_dir</name>
-    <value>/var/lib/ganglia/rrds</value>
-    <description>Default directory for saving the rrd files on ganglia server</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/metainfo.xml
deleted file mode 100644
index 09d78a6..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/metainfo.xml
+++ /dev/null
@@ -1,106 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>GANGLIA</name>
-      <comment>Ganglia Metrics Collection system</comment>
-      <version>3.5.0</version>
-      <components>
-        <component>
-          <name>GANGLIA_SERVER</name>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <commandScript>
-            <script>scripts/ganglia_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>GANGLIA_MONITOR</name>
-          <category>SLAVE</category>
-          <cardinality>ALL</cardinality>
-          <auto-deploy>
-            <enabled>true</enabled>
-          </auto-deploy>
-          <commandScript>
-            <script>scripts/ganglia_monitor.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>libganglia-3.5.0-99</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>ganglia-devel-3.5.0-99</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>ganglia-gmetad-3.5.0-99</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>ganglia-web-3.5.7-99.noarch</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>python-rrdtool.x86_64</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>ganglia-gmond-3.5.0-99</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>ganglia-gmond-modules-python-3.5.0-99</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osType>suse</osType>
-          <package>
-            <type>rpm</type>
-            <name>apache2</name>
-          </package>
-          <package>
-            <type>rpm</type>
-            <name>apache2-mod_php5</name>
-          </package>
-        </osSpecific>
-        <osSpecific>
-          <osType>centos6</osType>
-          <package>
-            <type>rpm</type>
-            <name>httpd</name>
-          </package>
-        </osSpecific>
-      </osSpecifics>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/checkGmetad.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/checkGmetad.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/checkGmetad.sh
deleted file mode 100644
index e60eb31..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/checkGmetad.sh
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./gmetadLib.sh;
-
-# Before checking gmetad, check rrdcached.
-./checkRrdcached.sh;
-
-gmetadRunningPid=`getGmetadRunningPid`;
-
-if [ -n "${gmetadRunningPid}" ]
-then
-  echo "${GMETAD_BIN} running with PID ${gmetadRunningPid}";
-else
-  echo "Failed to find running ${GMETAD_BIN}";
-  exit 1;
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/checkGmond.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/checkGmond.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/checkGmond.sh
deleted file mode 100644
index 0cec8dc..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/checkGmond.sh
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
-source ./gmondLib.sh;
-
-function checkGmondForCluster()
-{
-    gmondClusterName=${1};
-
-    gmondCoreConfFileName=`getGmondCoreConfFileName ${gmondClusterName}`;
-
-    # Skip over (purported) Clusters that don't have their core conf file present.
-    if [ -e "${gmondCoreConfFileName}" ]
-    then 
-      gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
-
-      if [ -n "${gmondRunningPid}" ]
-      then
-        echo "${GMOND_BIN} for cluster ${gmondClusterName} running with PID ${gmondRunningPid}";
-      else
-        echo "Failed to find running ${GMOND_BIN} for cluster ${gmondClusterName}";
-        exit 1;
-      fi
-    fi
-}
-
-# main()
-gmondClusterName=${1};
-
-if [ "x" == "x${gmondClusterName}" ]
-then
-    # No ${gmondClusterName} passed in as command-line arg, so check
-    # all the gmonds we know about.
-    for gmondClusterName in `getConfiguredGangliaClusterNames`
-    do
-        checkGmondForCluster ${gmondClusterName};
-    done
-else
-    # Just check the one ${gmondClusterName} that was asked for.
-    checkGmondForCluster ${gmondClusterName};
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/checkRrdcached.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/checkRrdcached.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/checkRrdcached.sh
deleted file mode 100644
index d94db5d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/checkRrdcached.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./rrdcachedLib.sh;
-
-rrdcachedRunningPid=`getRrdcachedRunningPid`;
-
-if [ -n "${rrdcachedRunningPid}" ]
-then
-  echo "${RRDCACHED_BIN} running with PID ${rrdcachedRunningPid}";
-else
-  echo "Failed to find running ${RRDCACHED_BIN}";
-  exit 1;
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/gmetad.init
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/gmetad.init b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/gmetad.init
deleted file mode 100644
index 20b388e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/gmetad.init
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/bin/sh
-# chkconfig: 2345 70 40
-# description: hdp-gmetad startup script
-# processname: hdp-gmetad
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-# Remember to keep this in-sync with the definition of 
-# GANGLIA_RUNTIME_COMPONENTS_UNPACK_DIR in monrpmInstaller.sh.
-HDP_GANGLIA_RUNTIME_COMPONENTS_DIR=/usr/libexec/hdp/ganglia
-HDP_GANLIA_GMETAD_STARTER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/startGmetad.sh
-HDP_GANLIA_GMETAD_STOPPER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/stopGmetad.sh
-HDP_GANLIA_GMETAD_CHECKER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/checkGmetad.sh
-
-RETVAL=0
-
-case "$1" in
-   start)
-      echo "============================="
-      echo "Starting hdp-gmetad..."
-      echo "============================="
-      [ -f ${HDP_GANLIA_GMETAD_STARTER} ] || exit 1
-      eval "${HDP_GANLIA_GMETAD_STARTER}"
-      RETVAL=$?
-      echo
-      [ $RETVAL -eq 0 ] && touch /var/lock/subsys/hdp-gmetad
-      ;;
-
-  stop)
-      echo "=================================="
-      echo "Shutting down hdp-gmetad..."
-      echo "=================================="
-      [ -f ${HDP_GANLIA_GMETAD_STOPPER} ] || exit 1
-      eval "${HDP_GANLIA_GMETAD_STOPPER}"
-      RETVAL=$?
-      echo
-      [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hdp-gmetad
-      ;;
-
-  restart|reload)
-   	$0 stop
-   	$0 start
-   	RETVAL=$?
-	;;
-  status)
-      echo "======================================="
-      echo "Checking status of hdp-gmetad..."
-      echo "======================================="
-      [ -f ${HDP_GANLIA_GMETAD_CHECKER} ] || exit 1
-      eval "${HDP_GANLIA_GMETAD_CHECKER}"
-      RETVAL=$?
-      ;;
-  *)
-	echo "Usage: $0 {start|stop|restart|status}"
-	exit 1
-esac
-
-exit $RETVAL

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/gmetadLib.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/gmetadLib.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/gmetadLib.sh
deleted file mode 100644
index e28610e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/gmetadLib.sh
+++ /dev/null
@@ -1,204 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Slurp in all our user-customizable settings.
-source ./gangliaEnv.sh;
-
-# Get access to Ganglia-wide constants etc.
-source ./gangliaLib.sh;
-
-GMETAD_BIN=/usr/sbin/gmetad;
-GMETAD_CONF_FILE=${GANGLIA_CONF_DIR}/gmetad.conf;
-GMETAD_PID_FILE=${GANGLIA_RUNTIME_DIR}/gmetad.pid;
-
-function getGmetadLoggedPid()
-{
-    if [ -e "${GMETAD_PID_FILE}" ]
-    then
-        echo `cat ${GMETAD_PID_FILE}`;
-    fi
-}
-
-function getGmetadRunningPid()
-{
-    gmetadLoggedPid=`getGmetadLoggedPid`;
-
-    if [ -n "${gmetadLoggedPid}" ]
-    then
-        echo `ps -o pid=MYPID -p ${gmetadLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
-    fi
-}
-
-function generateGmetadConf()
-{
-    now=`date`;
-
-    cat <<END_OF_GMETAD_CONF_1
-#################### Generated by ${0} on ${now} ####################
-#
-#-------------------------------------------------------------------------------
-# Setting the debug_level to 1 will keep daemon in the forground and
-# show only error messages. Setting this value higher than 1 will make 
-# gmetad output debugging information and stay in the foreground.
-# default: 0
-# debug_level 10
-#
-#-------------------------------------------------------------------------------
-# What to monitor. The most important section of this file. 
-#
-# The data_source tag specifies either a cluster or a grid to
-# monitor. If we detect the source is a cluster, we will maintain a complete
-# set of RRD databases for it, which can be used to create historical 
-# graphs of the metrics. If the source is a grid (it comes from another gmetad),
-# we will only maintain summary RRDs for it.
-#
-# Format: 
-# data_source "my cluster" [polling interval] address1:port addreses2:port ...
-# 
-# The keyword 'data_source' must immediately be followed by a unique
-# string which identifies the source, then an optional polling interval in 
-# seconds. The source will be polled at this interval on average. 
-# If the polling interval is omitted, 15sec is asssumed. 
-#
-# If you choose to set the polling interval to something other than the default,
-# note that the web frontend determines a host as down if its TN value is less
-# than 4 * TMAX (20sec by default).  Therefore, if you set the polling interval
-# to something around or greater than 80sec, this will cause the frontend to
-# incorrectly display hosts as down even though they are not.
-#
-# A list of machines which service the data source follows, in the 
-# format ip:port, or name:port. If a port is not specified then 8649
-# (the default gmond port) is assumed.
-# default: There is no default value
-#
-# data_source "my cluster" 10 localhost  my.machine.edu:8649  1.2.3.5:8655
-# data_source "my grid" 50 1.3.4.7:8655 grid.org:8651 grid-backup.org:8651
-# data_source "another source" 1.3.4.7:8655  1.3.4.8
-END_OF_GMETAD_CONF_1
-
-    # Get info about all the configured Ganglia clusters.
-    getGangliaClusterInfo | while read gangliaClusterInfoLine
-    do
-        # From each, parse out ${gmondClusterName}, ${gmondMasterIP} and ${gmondPort}... 
-        read gmondClusterName gmondMasterIP gmondPort <<<`echo ${gangliaClusterInfoLine}`;
-        # ...and generate a corresponding data_source line for gmetad.conf. 
-        echo "data_source \"${gmondClusterName}\" ${gmondMasterIP}:${gmondPort}";
-    done
-
-    cat <<END_OF_GMETAD_CONF_2
-#
-# Round-Robin Archives
-# You can specify custom Round-Robin archives here (defaults are listed below)
-#
-# Old Default RRA: Keep 1 hour of metrics at 15 second resolution. 1 day at 6 minute
-# RRAs "RRA:AVERAGE:0.5:1:244" "RRA:AVERAGE:0.5:24:244" "RRA:AVERAGE:0.5:168:244" "RRA:AVERAGE:0.5:672:244" \
-#      "RRA:AVERAGE:0.5:5760:374"
-# New Default RRA
-# Keep 5856 data points at 15 second resolution assuming 15 second (default) polling. That's 1 day
-# Two weeks of data points at 1 minute resolution (average)
-#RRAs "RRA:AVERAGE:0.5:1:5856" "RRA:AVERAGE:0.5:4:20160" "RRA:AVERAGE:0.5:40:52704"
-# Retaining existing resolution
-RRAs "RRA:AVERAGE:0.5:1:244" "RRA:AVERAGE:0.5:24:244" "RRA:AVERAGE:0.5:168:244" "RRA:AVERAGE:0.5:672:244" \
-     "RRA:AVERAGE:0.5:5760:374"
-#
-#-------------------------------------------------------------------------------
-# Scalability mode. If on, we summarize over downstream grids, and respect
-# authority tags. If off, we take on 2.5.0-era behavior: we do not wrap our output
-# in <GRID></GRID> tags, we ignore all <GRID> tags we see, and always assume
-# we are the "authority" on data source feeds. This approach does not scale to
-# large groups of clusters, but is provided for backwards compatibility.
-# default: on
-# scalable off
-#
-#-------------------------------------------------------------------------------
-# The name of this Grid. All the data sources above will be wrapped in a GRID
-# tag with this name.
-# default: unspecified
-gridname "HDP_GRID"
-#
-#-------------------------------------------------------------------------------
-# The authority URL for this grid. Used by other gmetads to locate graphs
-# for our data sources. Generally points to a ganglia/
-# website on this machine.
-# default: "http://hostname/ganglia/",
-#   where hostname is the name of this machine, as defined by gethostname().
-# authority "http://mycluster.org/newprefix/"
-#
-#-------------------------------------------------------------------------------
-# List of machines this gmetad will share XML with. Localhost
-# is always trusted. 
-# default: There is no default value
-# trusted_hosts 127.0.0.1 169.229.50.165 my.gmetad.org
-#
-#-------------------------------------------------------------------------------
-# If you want any host which connects to the gmetad XML to receive
-# data, then set this value to "on"
-# default: off
-# all_trusted on
-#
-#-------------------------------------------------------------------------------
-# If you don't want gmetad to setuid then set this to off
-# default: on
-# setuid off
-#
-#-------------------------------------------------------------------------------
-# User gmetad will setuid to (defaults to "nobody")
-# default: "nobody"
-setuid_username "${GMETAD_USER}"
-#
-#-------------------------------------------------------------------------------
-# Umask to apply to created rrd files and grid directory structure
-# default: 0 (files are public)
-# umask 022
-#
-#-------------------------------------------------------------------------------
-# The port gmetad will answer requests for XML
-# default: 8651
-# xml_port 8651
-#
-#-------------------------------------------------------------------------------
-# The port gmetad will answer queries for XML. This facility allows
-# simple subtree and summation views of the XML tree.
-# default: 8652
-# interactive_port 8652
-#
-#-------------------------------------------------------------------------------
-# The number of threads answering XML requests
-# default: 4
-# server_threads 10
-#
-#-------------------------------------------------------------------------------
-# Where gmetad stores its round-robin databases
-# default: "/var/lib/ganglia/rrds"
-# rrd_rootdir "/some/other/place"
-#
-#-------------------------------------------------------------------------------
-# In earlier versions of gmetad, hostnames were handled in a case
-# sensitive manner
-# If your hostname directories have been renamed to lower case,
-# set this option to 0 to disable backward compatibility.
-# From version 3.2, backwards compatibility will be disabled by default.
-# default: 1   (for gmetad < 3.2)
-# default: 0   (for gmetad >= 3.2)
-case_sensitive_hostnames 1
-END_OF_GMETAD_CONF_2
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/gmond.init
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/gmond.init b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/gmond.init
deleted file mode 100644
index afb7026..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/gmond.init
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/bin/sh
-# chkconfig: 2345 70 40
-# description: hdp-gmond startup script
-# processname: hdp-gmond
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-# Remember to keep this in-sync with the definition of 
-# GANGLIA_RUNTIME_COMPONENTS_UNPACK_DIR in monrpmInstaller.sh.
-HDP_GANGLIA_RUNTIME_COMPONENTS_DIR=/usr/libexec/hdp/ganglia
-HDP_GANLIA_GMOND_STARTER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/startGmond.sh
-HDP_GANLIA_GMOND_STOPPER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/stopGmond.sh
-HDP_GANLIA_GMOND_CHECKER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/checkGmond.sh
-
-RETVAL=0
-
-case "$1" in
-   start)
-      echo "============================="
-      echo "Starting hdp-gmond..."
-      echo "============================="
-      [ -f ${HDP_GANLIA_GMOND_STARTER} ] || exit 1
-      eval "${HDP_GANLIA_GMOND_STARTER}"
-      RETVAL=$?
-      echo
-      [ $RETVAL -eq 0 ] && touch /var/lock/subsys/hdp-gmond
-      ;;
-
-  stop)
-      echo "=================================="
-      echo "Shutting down hdp-gmond..."
-      echo "=================================="
-      [ -f ${HDP_GANLIA_GMOND_STOPPER} ] || exit 1
-      eval "${HDP_GANLIA_GMOND_STOPPER}"
-      RETVAL=$?
-      echo
-      [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hdp-gmond
-      ;;
-
-  restart|reload)
-   	$0 stop
-   	$0 start
-   	RETVAL=$?
-	;;
-  status)
-      echo "======================================="
-      echo "Checking status of hdp-gmond..."
-      echo "======================================="
-      [ -f ${HDP_GANLIA_GMOND_CHECKER} ] || exit 1
-      eval "${HDP_GANLIA_GMOND_CHECKER}"
-      RETVAL=$?
-      ;;
-  *)
-	echo "Usage: $0 {start|stop|restart|status}"
-	exit 1
-esac
-
-exit $RETVAL

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/gmondLib.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/gmondLib.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/gmondLib.sh
deleted file mode 100644
index 87da4dd..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/gmondLib.sh
+++ /dev/null
@@ -1,545 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Slurp in all our user-customizable settings.
-source ./gangliaEnv.sh;
-
-# Get access to Ganglia-wide constants etc.
-source ./gangliaLib.sh;
-
-GMOND_BIN=/usr/sbin/gmond;
-GMOND_CORE_CONF_FILE=gmond.core.conf;
-GMOND_MASTER_CONF_FILE=gmond.master.conf;
-GMOND_SLAVE_CONF_FILE=gmond.slave.conf;
-GMOND_PID_FILE=gmond.pid;
-
-# Functions.
-function getGmondCoreConfFileName()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        # ${clusterName} is not empty. 
-        echo "${GANGLIA_CONF_DIR}/${clusterName}/${GMOND_CORE_CONF_FILE}";
-    else
-        echo "${GANGLIA_CONF_DIR}/${GMOND_CORE_CONF_FILE}";
-    fi
-}
-
-function getGmondMasterConfFileName()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        # ${clusterName} is not empty. 
-        echo "${GANGLIA_CONF_DIR}/${clusterName}/conf.d/${GMOND_MASTER_CONF_FILE}";
-    else
-        echo "${GANGLIA_CONF_DIR}/conf.d/${GMOND_MASTER_CONF_FILE}";
-    fi
-}
-
-function getGmondSlaveConfFileName()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        # ${clusterName} is not empty. 
-        echo "${GANGLIA_CONF_DIR}/${clusterName}/conf.d/${GMOND_SLAVE_CONF_FILE}";
-    else
-        echo "${GANGLIA_CONF_DIR}/conf.d/${GMOND_SLAVE_CONF_FILE}";
-    fi
-}
-
-function getGmondPidFileName()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        # ${clusterName} is not empty. 
-        echo "${GANGLIA_RUNTIME_DIR}/${clusterName}/${GMOND_PID_FILE}";
-    else
-        echo "${GANGLIA_RUNTIME_DIR}/${GMOND_PID_FILE}";
-    fi
-}
-
-function getGmondLoggedPid()
-{
-    gmondPidFile=`getGmondPidFileName ${1}`;
-
-    if [ -e "${gmondPidFile}" ]
-    then
-        echo `cat ${gmondPidFile}`;
-    fi
-}
-
-function getGmondRunningPid()
-{
-    gmondLoggedPid=`getGmondLoggedPid ${1}`;
-
-    if [ -n "${gmondLoggedPid}" ]
-    then
-        echo `ps -o pid=MYPID -p ${gmondLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
-    fi
-}
-
-function generateGmondCoreConf()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
-
-        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
-        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
-        then
-            now=`date`;
-
-            cat << END_OF_GMOND_CORE_CONF
-#################### Generated by ${0} on ${now} ####################
-#
-/* This configuration is as close to 2.5.x default behavior as possible
-   The values closely match ./gmond/metric.h definitions in 2.5.x */
-globals {
-  daemonize = yes
-  setuid = yes
-  user = ${GMOND_USER}
-  debug_level = 0
-  max_udp_msg_len = 1472
-  mute = no
-  deaf = no 
-  allow_extra_data = yes
-  host_dmax = 0 /*secs */
-  host_tmax = 20 /*secs */
-  cleanup_threshold = 300 /*secs */
-  gexec = no
-  send_metadata_interval = 30 /*secs */
-}
-
-/*
- * The cluster attributes specified will be used as part of the <CLUSTER>
- * tag that will wrap all hosts collected by this instance.
- */
-cluster {
-  name = "${gmondClusterName}"
-  owner = "unspecified"
-  latlong = "unspecified"
-  url = "unspecified"
-}
-
-/* The host section describes attributes of the host, like the location */
-host {
-  location = "unspecified"
-}
-
-/* You can specify as many tcp_accept_channels as you like to share
- * an XML description of the state of the cluster.
- *
- * At the very least, every gmond must expose its XML state to 
- * queriers from localhost.
- */
-tcp_accept_channel {
-  bind = localhost
-  port = ${gmondPort}
-}
-
-/* Each metrics module that is referenced by gmond must be specified and
-   loaded. If the module has been statically linked with gmond, it does
-   not require a load path. However all dynamically loadable modules must
-   include a load path. */
-modules {
-  module {
-    name = "core_metrics"
-  }
-  module {
-    name = "cpu_module"
-    path = "modcpu.so"
-  }
-  module {
-    name = "disk_module"
-    path = "moddisk.so"
-  }
-  module {
-    name = "load_module"
-    path = "modload.so"
-  }
-  module {
-    name = "mem_module"
-    path = "modmem.so"
-  }
-  module {
-    name = "net_module"
-    path = "modnet.so"
-  }
-  module {
-    name = "proc_module"
-    path = "modproc.so"
-  }
-  module {
-    name = "sys_module"
-    path = "modsys.so"
-  }
-}
-
-/* The old internal 2.5.x metric array has been replaced by the following
-   collection_group directives.  What follows is the default behavior for
-   collecting and sending metrics that is as close to 2.5.x behavior as
-   possible. */
-
-/* This collection group will cause a heartbeat (or beacon) to be sent every
-   20 seconds.  In the heartbeat is the GMOND_STARTED data which expresses
-   the age of the running gmond. */
-collection_group {
-  collect_once = yes
-  time_threshold = 20
-  metric {
-    name = "heartbeat"
-  }
-}
-
-/* This collection group will send general info about this host total memory every
-   180 secs.
-   This information doesn't change between reboots and is only collected
-   once. This information needed for heatmap showing */
- collection_group {
-   collect_once = yes
-   time_threshold = 180
-   metric {
-    name = "mem_total"
-    title = "Memory Total"
-   }
- }
-
-/* This collection group will send general info about this host every
-   1200 secs.
-   This information doesn't change between reboots and is only collected
-   once. */
-collection_group {
-  collect_once = yes
-  time_threshold = 1200
-  metric {
-    name = "cpu_num"
-    title = "CPU Count"
-  }
-  metric {
-    name = "cpu_speed"
-    title = "CPU Speed"
-  }
-  /* Should this be here? Swap can be added/removed between reboots. */
-  metric {
-    name = "swap_total"
-    title = "Swap Space Total"
-  }
-  metric {
-    name = "boottime"
-    title = "Last Boot Time"
-  }
-  metric {
-    name = "machine_type"
-    title = "Machine Type"
-  }
-  metric {
-    name = "os_name"
-    title = "Operating System"
-  }
-  metric {
-    name = "os_release"
-    title = "Operating System Release"
-  }
-  metric {
-    name = "location"
-    title = "Location"
-  }
-}
-
-/* This collection group will send the status of gexecd for this host
-   every 300 secs.*/
-/* Unlike 2.5.x the default behavior is to report gexecd OFF. */
-collection_group {
-  collect_once = yes
-  time_threshold = 300
-  metric {
-    name = "gexec"
-    title = "Gexec Status"
-  }
-}
-
-/* This collection group will collect the CPU status info every 20 secs.
-   The time threshold is set to 90 seconds.  In honesty, this
-   time_threshold could be set significantly higher to reduce
-   unneccessary  network chatter. */
-collection_group {
-  collect_every = 20
-  time_threshold = 90
-  /* CPU status */
-  metric {
-    name = "cpu_user"
-    value_threshold = "1.0"
-    title = "CPU User"
-  }
-  metric {
-    name = "cpu_system"
-    value_threshold = "1.0"
-    title = "CPU System"
-  }
-  metric {
-    name = "cpu_idle"
-    value_threshold = "5.0"
-    title = "CPU Idle"
-  }
-  metric {
-    name = "cpu_nice"
-    value_threshold = "1.0"
-    title = "CPU Nice"
-  }
-  metric {
-    name = "cpu_aidle"
-    value_threshold = "5.0"
-    title = "CPU aidle"
-  }
-  metric {
-    name = "cpu_wio"
-    value_threshold = "1.0"
-    title = "CPU wio"
-  }
-  /* The next two metrics are optional if you want more detail...
-     ... since they are accounted for in cpu_system.
-  metric {
-    name = "cpu_intr"
-    value_threshold = "1.0"
-    title = "CPU intr"
-  }
-  metric {
-    name = "cpu_sintr"
-    value_threshold = "1.0"
-    title = "CPU sintr"
-  }
-  */
-}
-
-collection_group {
-  collect_every = 20
-  time_threshold = 90
-  /* Load Averages */
-  metric {
-    name = "load_one"
-    value_threshold = "1.0"
-    title = "One Minute Load Average"
-  }
-  metric {
-    name = "load_five"
-    value_threshold = "1.0"
-    title = "Five Minute Load Average"
-  }
-  metric {
-    name = "load_fifteen"
-    value_threshold = "1.0"
-    title = "Fifteen Minute Load Average"
-  }
-}
-
-/* This group collects the number of running and total processes */
-collection_group {
-  collect_every = 80
-  time_threshold = 950
-  metric {
-    name = "proc_run"
-    value_threshold = "1.0"
-    title = "Total Running Processes"
-  }
-  metric {
-    name = "proc_total"
-    value_threshold = "1.0"
-    title = "Total Processes"
-  }
-}
-
-/* This collection group grabs the volatile memory metrics every 40 secs and
-   sends them at least every 180 secs.  This time_threshold can be increased
-   significantly to reduce unneeded network traffic. */
-collection_group {
-  collect_every = 40
-  time_threshold = 180
-  metric {
-    name = "mem_free"
-    value_threshold = "1024.0"
-    title = "Free Memory"
-  }
-  metric {
-    name = "mem_shared"
-    value_threshold = "1024.0"
-    title = "Shared Memory"
-  }
-  metric {
-    name = "mem_buffers"
-    value_threshold = "1024.0"
-    title = "Memory Buffers"
-  }
-  metric {
-    name = "mem_cached"
-    value_threshold = "1024.0"
-    title = "Cached Memory"
-  }
-  metric {
-    name = "swap_free"
-    value_threshold = "1024.0"
-    title = "Free Swap Space"
-  }
-}
-
-collection_group {
-  collect_every = 40
-  time_threshold = 300
-  metric {
-    name = "bytes_out"
-    value_threshold = 4096
-    title = "Bytes Sent"
-  }
-  metric {
-    name = "bytes_in"
-    value_threshold = 4096
-    title = "Bytes Received"
-  }
-  metric {
-    name = "pkts_in"
-    value_threshold = 256
-    title = "Packets Received"
-  }
-  metric {
-    name = "pkts_out"
-    value_threshold = 256
-    title = "Packets Sent"
-  }
-}
-
-
-collection_group {
-  collect_every = 40
-  time_threshold = 180
-  metric {
-    name = "disk_free"
-    value_threshold = 1.0
-    title = "Disk Space Available"
-  }
-  metric {
-    name = "part_max_used"
-    value_threshold = 1.0
-    title = "Maximum Disk Space Used"
-  }
-  metric {
-    name = "disk_total"
-    value_threshold = 1.0
-    title = "Total Disk Space"
-  }
-}
-
-udp_recv_channel {
-    port = 0
-}
-
-
-include ("${GANGLIA_CONF_DIR}/${gmondClusterName}/conf.d/*.conf")
-END_OF_GMOND_CORE_CONF
-        else
-            return 2;
-        fi
-    else
-        return 1;
-    fi
-}
-
-function generateGmondMasterConf
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
-
-        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
-        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
-        then
-            now=`date`;
-
-            cat << END_OF_GMOND_MASTER_CONF
-#################### Generated by ${0} on ${now} ####################
-/* Masters only receive; they never send. */
-udp_recv_channel {
-  bind = ${gmondMasterIP}
-  port = ${gmondPort}
-}
-
-/* The gmond cluster master must additionally provide an XML 
- * description of the cluster to the gmetad that will query it.
- */
-tcp_accept_channel {
-  bind = ${gmondMasterIP}
-  port = ${gmondPort}
-}
-END_OF_GMOND_MASTER_CONF
-        else
-            return 2;
-        fi
-    else
-        return 1;
-    fi
-}
-
-function generateGmondSlaveConf
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
-
-        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
-        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
-        then
-            now=`date`;
-
-            cat << END_OF_GMOND_SLAVE_CONF
-#################### Generated by ${0} on ${now} ####################
-/* Slaves only send; they never receive. */
-udp_send_channel {
-  #bind_hostname = yes # Highly recommended, soon to be default.
-                       # This option tells gmond to use a source address
-                       # that resolves to the machine's hostname.  Without
-                       # this, the metrics may appear to come from any
-                       # interface and the DNS names associated with
-                       # those IPs will be used to create the RRDs.
-  host = ${gmondMasterIP}
-  port = ${gmondPort}
-  ttl = 1
-}
-END_OF_GMOND_SLAVE_CONF
-        else
-            return 2;
-        fi
-    else
-        return 1;
-    fi
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/rrd.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/rrd.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/rrd.py
deleted file mode 100644
index 3fe6901..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/rrd.py
+++ /dev/null
@@ -1,213 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import cgi
-import os
-import rrdtool
-import sys
-import time
-import re
-import urlparse
-
-# place this script in /var/www/cgi-bin of the Ganglia collector
-# requires 'yum install rrdtool-python' on the Ganglia collector
-
-
-def printMetric(clusterName, hostName, metricName, file, cf, start, end,
-                resolution, pointInTime):
-  if clusterName.endswith("rrds"):
-    clusterName = ""
-
-  args = [file, cf]
-
-  if start is not None:
-    args.extend(["-s", start])
-
-  if end is not None:
-    args.extend(["-e", end])
-
-  if resolution is not None:
-    args.extend(["-r", resolution])
-
-  rrdMetric = rrdtool.fetch(args)
-  # ds_name
-  sys.stdout.write(rrdMetric[1][0])
-  sys.stdout.write("\n")
-
-  sys.stdout.write(clusterName)
-  sys.stdout.write("\n")
-  sys.stdout.write(hostName)
-  sys.stdout.write("\n")
-  sys.stdout.write(metricName)
-  sys.stdout.write("\n")
-
-  # write time
-  sys.stdout.write(str(rrdMetric[0][0]))
-  sys.stdout.write("\n")
-  # write step
-  sys.stdout.write(str(rrdMetric[0][2]))
-  sys.stdout.write("\n")
-
-  if not pointInTime:
-    valueCount = 0
-    lastValue = None
-
-    for tuple in rrdMetric[2]:
-
-      thisValue = tuple[0]
-
-      if valueCount > 0 and thisValue == lastValue:
-        valueCount += 1
-      else:
-        if valueCount > 1:
-          sys.stdout.write("[~r]")
-          sys.stdout.write(str(valueCount))
-          sys.stdout.write("\n")
-
-        if thisValue is None:
-          sys.stdout.write("[~n]\n")
-        else:
-          sys.stdout.write(str(thisValue))
-          sys.stdout.write("\n")
-
-        valueCount = 1
-        lastValue = thisValue
-  else:
-    value = None
-    idx = -1
-    tuple = rrdMetric[2]
-    tupleLastIdx = len(tuple) * -1
-
-    while value is None and idx >= tupleLastIdx:
-      value = tuple[idx][0]
-      idx -= 1
-
-    if value is not None:
-      sys.stdout.write(str(value))
-      sys.stdout.write("\n")
-
-  sys.stdout.write("[~EOM]\n")
-  return
-
-
-def stripList(l):
-  return ([x.strip() for x in l])
-
-
-sys.stdout.write("Content-type: text/plain\n\n")
-
-# write start time
-sys.stdout.write(str(time.mktime(time.gmtime())))
-sys.stdout.write("\n")
-
-requestMethod = os.environ['REQUEST_METHOD']
-
-if requestMethod == 'POST':
-  postData = sys.stdin.readline()
-  queryString = cgi.parse_qs(postData)
-  queryString = dict((k, v[0]) for k, v in queryString.items())
-elif requestMethod == 'GET':
-  queryString = dict(cgi.parse_qsl(os.environ['QUERY_STRING']));
-
-if "m" in queryString:
-  metricParts = queryString["m"].split(",")
-else:
-  metricParts = [""]
-metricParts = stripList(metricParts)
-
-hostParts = []
-if "h" in queryString:
-  hostParts = queryString["h"].split(",")
-hostParts = stripList(hostParts)
-
-if "c" in queryString:
-  clusterParts = queryString["c"].split(",")
-else:
-  clusterParts = [""]
-clusterParts = stripList(clusterParts)
-
-if "p" in queryString:
-  rrdPath = queryString["p"]
-else:
-  rrdPath = "/var/lib/ganglia/rrds/"
-
-start = None
-if "s" in queryString:
-  start = queryString["s"]
-
-end = None
-if "e" in queryString:
-  end = queryString["e"]
-
-resolution = None
-if "r" in queryString:
-  resolution = queryString["r"]
-
-if "cf" in queryString:
-  cf = queryString["cf"]
-else:
-  cf = "AVERAGE"
-
-if "pt" in queryString:
-  pointInTime = True
-else:
-  pointInTime = False
-
-
-def _walk(*args, **kwargs):
-  for root, dirs, files in os.walk(*args, **kwargs):
-    for dir in dirs:
-      qualified_dir = os.path.join(root, dir)
-      if os.path.islink(qualified_dir):
-        for x in os.walk(qualified_dir, **kwargs):
-          yield x
-    yield (root, dirs, files)
-
-
-for cluster in clusterParts:
-  for path, dirs, files in _walk(rrdPath + cluster):
-    pathParts = path.split("/")
-    #Process only path which contains files. If no host parameter passed - process all hosts folders and summary info
-    #If host parameter passed - process only this host folder
-    if len(files) > 0 and (len(hostParts) == 0 or pathParts[-1] in hostParts):
-      for metric in metricParts:
-        file = metric + ".rrd"
-        fileFullPath = os.path.join(path, file)
-        if os.path.exists(fileFullPath):
-          #Exact name of metric
-          printMetric(pathParts[-2], pathParts[-1], file[:-4],
-                      os.path.join(path, file), cf, start, end, resolution,
-                      pointInTime)
-        else:
-          #Regex as metric name
-          metricRegex = metric + '\.rrd$'
-          p = re.compile(metricRegex)
-          matchedFiles = filter(p.match, files)
-          for matchedFile in matchedFiles:
-            printMetric(pathParts[-2], pathParts[-1], matchedFile[:-4],
-                        os.path.join(path, matchedFile), cf, start, end,
-                        resolution, pointInTime)
-
-sys.stdout.write("[~EOF]\n")
-# write end time
-sys.stdout.write(str(time.mktime(time.gmtime())))
-sys.stdout.write("\n")
-
-sys.stdout.flush

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/rrdcachedLib.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/rrdcachedLib.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/rrdcachedLib.sh
deleted file mode 100644
index 8b7c257..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/rrdcachedLib.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get access to Ganglia-wide constants etc.
-source ./gangliaLib.sh;
-
-RRDCACHED_BIN=/usr/bin/rrdcached;
-RRDCACHED_PID_FILE=${GANGLIA_RUNTIME_DIR}/rrdcached.pid;
-RRDCACHED_ALL_ACCESS_UNIX_SOCKET=${GANGLIA_RUNTIME_DIR}/rrdcached.sock;
-RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET=${GANGLIA_RUNTIME_DIR}/rrdcached.limited.sock;
-
-function getRrdcachedLoggedPid()
-{
-    if [ -e "${RRDCACHED_PID_FILE}" ]
-    then
-        echo `cat ${RRDCACHED_PID_FILE}`;
-    fi
-}
-
-function getRrdcachedRunningPid()
-{
-    rrdcachedLoggedPid=`getRrdcachedLoggedPid`;
-
-    if [ -n "${rrdcachedLoggedPid}" ]
-    then
-        echo `ps -o pid=MYPID -p ${rrdcachedLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
-    fi
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/setupGanglia.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/setupGanglia.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/setupGanglia.sh
deleted file mode 100644
index 5145b9c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/setupGanglia.sh
+++ /dev/null
@@ -1,141 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get access to Ganglia-wide constants, utilities etc.
-source ./gangliaLib.sh
-
-function usage()
-{
-  cat << END_USAGE
-Usage: ${0} [-c <gmondClusterName> [-m]] [-t] [-o <owner>] [-g <group>]
-
-Options:
-  -c <gmondClusterName>   The name of the Ganglia Cluster whose gmond configuration we're here to generate.
-
-  -m                      Whether this gmond (if -t is not specified) is the master for its Ganglia 
-                          Cluster. Without this, we generate slave gmond configuration.
-
-  -t                      Whether this is a call to generate gmetad configuration (as opposed to the
-                          gmond configuration that is generated without this).
-  -o <owner>              Owner
-  -g <group>              Group
-END_USAGE
-}
-
-function instantiateGmetadConf()
-{
-  # gmetad utility library.
-  source ./gmetadLib.sh;
-
-  generateGmetadConf > ${GMETAD_CONF_FILE};
-}
-
-function instantiateGmondConf()
-{
-  # gmond utility library.
-  source ./gmondLib.sh;
- 
-  gmondClusterName=${1};
-
-  if [ "x" != "x${gmondClusterName}" ]
-  then
-
-    createDirectory "${GANGLIA_RUNTIME_DIR}/${gmondClusterName}";
-    createDirectory "${GANGLIA_CONF_DIR}/${gmondClusterName}/conf.d";
-    
-    # Always blindly generate the core gmond config - that goes on every box running gmond. 
-    generateGmondCoreConf ${gmondClusterName} > `getGmondCoreConfFileName ${gmondClusterName}`;
-
-    isMasterGmond=${2};
-
-    # Decide whether we want to add on the master or slave gmond config.
-    if [ "0" -eq "${isMasterGmond}" ]
-    then
-      generateGmondSlaveConf ${gmondClusterName} > `getGmondSlaveConfFileName ${gmondClusterName}`;
-    else
-      generateGmondMasterConf ${gmondClusterName} > `getGmondMasterConfFileName ${gmondClusterName}`;
-    fi
-
-    chown -R ${3}:${4} ${GANGLIA_CONF_DIR}/${gmondClusterName}
-
-  else
-    echo "No gmondClusterName passed in, nothing to instantiate";
-  fi
-}
-
-# main()
-
-gmondClusterName=;
-isMasterGmond=0;
-configureGmetad=0;
-owner='root';
-group='root';
-
-while getopts ":c:mto:g:" OPTION
-do
-  case ${OPTION} in
-    c) 
-      gmondClusterName=${OPTARG};
-      ;;
-    m)
-      isMasterGmond=1;
-      ;;
-    t)
-      configureGmetad=1;
-      ;;
-    o)
-      owner=${OPTARG};
-      ;;
-    g)
-      group=${OPTARG};
-      ;;
-    ?)
-      usage;
-      exit 1;
-  esac
-done
-
-# Initialization.
-createDirectory ${GANGLIA_CONF_DIR};
-createDirectory ${GANGLIA_RUNTIME_DIR};
-# So rrdcached can drop its PID files in here.
-chmod a+w ${GANGLIA_RUNTIME_DIR};
-chown ${owner}:${group} ${GANGLIA_CONF_DIR};
-
-if [ -n "${gmondClusterName}" ]
-then
-
-  # Be forgiving of users who pass in -c along with -t (which always takes precedence).
-  if [ "1" -eq "${configureGmetad}" ]
-  then
-    instantiateGmetadConf;
-  else
-    instantiateGmondConf ${gmondClusterName} ${isMasterGmond} ${owner} ${group};
-  fi
-
-elif [ "1" -eq "${configureGmetad}" ]
-then
-  instantiateGmetadConf;
-else
-  usage;
-  exit 2;
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/startGmetad.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/startGmetad.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/startGmetad.sh
deleted file mode 100644
index ab5102d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/startGmetad.sh
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./gmetadLib.sh;
-
-# To get access to ${RRDCACHED_ALL_ACCESS_UNIX_SOCKET}.
-source ./rrdcachedLib.sh;
-
-# Before starting gmetad, start rrdcached.
-./startRrdcached.sh;
-
-if [ $? -eq 0 ] 
-then
-    gmetadRunningPid=`getGmetadRunningPid`;
-
-    # Only attempt to start gmetad if there's not already one running.
-    if [ -z "${gmetadRunningPid}" ]
-    then
-        env RRDCACHED_ADDRESS=${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
-                    ${GMETAD_BIN} --conf=${GMETAD_CONF_FILE} --pid-file=${GMETAD_PID_FILE};
-
-        for i in `seq 0 5`; do
-          gmetadRunningPid=`getGmetadRunningPid`;
-          if [ -n "${gmetadRunningPid}" ]
-          then
-            break;
-          fi
-          sleep 1;
-        done
-
-        if [ -n "${gmetadRunningPid}" ]
-        then
-            echo "Started ${GMETAD_BIN} with PID ${gmetadRunningPid}";
-        else
-            echo "Failed to start ${GMETAD_BIN}";
-            exit 1;
-        fi
-    else
-        echo "${GMETAD_BIN} already running with PID ${gmetadRunningPid}";
-    fi
-else
-    echo "Not starting ${GMETAD_BIN} because starting ${RRDCACHED_BIN} failed.";
-    exit 2;
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/startGmond.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/startGmond.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/startGmond.sh
deleted file mode 100644
index 239b62e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/startGmond.sh
+++ /dev/null
@@ -1,80 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
-source ./gmondLib.sh;
-
-function startGmondForCluster()
-{
-    gmondClusterName=${1};
-
-    gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
-
-    # Only attempt to start gmond if there's not already one running.
-    if [ -z "${gmondRunningPid}" ]
-    then
-      gmondCoreConfFileName=`getGmondCoreConfFileName ${gmondClusterName}`;
-
-      if [ -e "${gmondCoreConfFileName}" ]
-      then 
-        gmondPidFileName=`getGmondPidFileName ${gmondClusterName}`;
-
-        ${GMOND_BIN} --conf=${gmondCoreConfFileName} --pid-file=${gmondPidFileName};
-
-        for i in `seq 0 5`; do
-          gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
-          if [ -n "${gmondRunningPid}" ]
-          then
-            break;
-          fi
-          sleep 1;
-        done
-  
-        if [ -n "${gmondRunningPid}" ]
-        then
-            echo "Started ${GMOND_BIN} for cluster ${gmondClusterName} with PID ${gmondRunningPid}";
-        else
-            echo "Failed to start ${GMOND_BIN} for cluster ${gmondClusterName}";
-            exit 1;
-        fi
-      fi 
-    else
-      echo "${GMOND_BIN} for cluster ${gmondClusterName} already running with PID ${gmondRunningPid}";
-    fi
-}
-
-# main()
-gmondClusterName=${1};
-
-if [ "x" == "x${gmondClusterName}" ]
-then
-    # No ${gmondClusterName} passed in as command-line arg, so start 
-    # all the gmonds we know about.
-    for gmondClusterName in `getConfiguredGangliaClusterNames`
-    do
-        startGmondForCluster ${gmondClusterName};
-    done
-else
-    # Just start the one ${gmondClusterName} that was asked for.
-    startGmondForCluster ${gmondClusterName};
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/startRrdcached.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/startRrdcached.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/startRrdcached.sh
deleted file mode 100644
index e79472b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/startRrdcached.sh
+++ /dev/null
@@ -1,69 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Slurp in all our user-customizable settings.
-source ./gangliaEnv.sh;
-
-# Get all our common constants etc. set up.
-source ./rrdcachedLib.sh;
-
-rrdcachedRunningPid=`getRrdcachedRunningPid`;
-
-# Only attempt to start rrdcached if there's not already one running.
-if [ -z "${rrdcachedRunningPid}" ]
-then
-    #changed because problem puppet had with nobody user
-    #sudo -u ${GMETAD_USER} ${RRDCACHED_BIN} -p ${RRDCACHED_PID_FILE} \
-    #         -m 664 -l unix:${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
-    #         -m 777 -P FLUSH,STATS,HELP -l unix:${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET} \
-    #         -b /var/lib/ganglia/rrds -B
-    su - ${GMETAD_USER} -c "${RRDCACHED_BIN} -p ${RRDCACHED_PID_FILE} \
-             -m 664 -l unix:${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
-             -m 777 -P FLUSH,STATS,HELP -l unix:${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET} \
-             -b ${RRDCACHED_BASE_DIR} -B"
-
-    # Ideally, we'd use ${RRDCACHED_BIN}'s -s ${WEBSERVER_GROUP} option for 
-    # this, but it doesn't take sometimes due to a lack of permissions,
-    # so perform the operation explicitly to be super-sure.
-    chgrp ${WEBSERVER_GROUP} ${RRDCACHED_ALL_ACCESS_UNIX_SOCKET};
-    chgrp ${WEBSERVER_GROUP} ${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET};
-
-    # Check to make sure rrdcached actually started up.
-    for i in `seq 0 5`; do
-      rrdcachedRunningPid=`getRrdcachedRunningPid`;
-      if [ -n "${rrdcachedRunningPid}" ]
-        then
-          break;
-      fi
-      sleep 1;
-    done
-
-    if [ -n "${rrdcachedRunningPid}" ]
-    then
-        echo "Started ${RRDCACHED_BIN} with PID ${rrdcachedRunningPid}";
-    else
-        echo "Failed to start ${RRDCACHED_BIN}";
-        exit 1;
-    fi
-else
-    echo "${RRDCACHED_BIN} already running with PID ${rrdcachedRunningPid}";
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/stopGmetad.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/stopGmetad.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/stopGmetad.sh
deleted file mode 100644
index 2764e0e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/stopGmetad.sh
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./gmetadLib.sh;
-
-gmetadRunningPid=`getGmetadRunningPid`;
-
-# Only go ahead with the termination if we could find a running PID.
-if [ -n "${gmetadRunningPid}" ]
-then
-    kill -KILL ${gmetadRunningPid};
-    echo "Stopped ${GMETAD_BIN} (with PID ${gmetadRunningPid})";
-fi
-
-# Poll again.
-gmetadRunningPid=`getGmetadRunningPid`;
-
-# Once we've killed gmetad, there should no longer be a running PID.
-if [ -z "${gmetadRunningPid}" ]
-then
-    # It's safe to stop rrdcached now.
-    ./stopRrdcached.sh;
-fi


[05/12] AMBARI-4336. Move 1.3.4 stack to 1.3.3 using the python libraries. (mahadev)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/params.py
deleted file mode 100644
index 7be5a7c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/params.py
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-from resource_management import *
-import status_params
-
-# server configurations
-config = Script.get_config()
-
-conf_dir = "/etc/hadoop/conf"
-
-mapred_user = status_params.mapred_user
-pid_dir_prefix = status_params.pid_dir_prefix
-mapred_pid_dir = status_params.mapred_pid_dir
-
-historyserver_pid_file = status_params.historyserver_pid_file
-jobtracker_pid_file = status_params.jobtracker_pid_file
-tasktracker_pid_file = status_params.tasktracker_pid_file
-
-hadoop_libexec_dir = '/usr/lib/hadoop/libexec'
-hadoop_bin = "/usr/lib/hadoop/bin"
-user_group = config['configurations']['global']['user_group']
-hdfs_log_dir_prefix = config['configurations']['global']['hdfs_log_dir_prefix']
-mapred_log_dir = format("{hdfs_log_dir_prefix}/{mapred_user}")
-mapred_local_dir = config['configurations']['mapred-site']['mapred.local.dir']
-
-hadoop_jar_location = "/usr/lib/hadoop/"
-smokeuser = config['configurations']['global']['smokeuser']
-security_enabled = config['configurations']['global']['security_enabled']
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-
-#exclude file
-mr_exclude_hosts = default("/clusterHostInfo/decom_tt_hosts", [])
-exclude_file_path = config['configurations']['mapred-site']['mapred.hosts.exclude']
-mapred_hosts_file_path = config['configurations']['mapred-site']['mapred.hosts']
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/service.py
deleted file mode 100644
index f4aa91b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/service.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-
-def service(
-    name,
-    action='start'):
-
-  import params
-
-  pid_file = format("{mapred_pid_dir}/hadoop-{mapred_user}-{name}.pid")
-  hadoop_daemon = format("export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && {hadoop_bin}/hadoop-daemon.sh")
-  cmd = format("{hadoop_daemon} --config {conf_dir}")
-
-  if action == 'start':
-    daemon_cmd = format("{cmd} start {name}")
-    no_op = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
-    Execute(daemon_cmd,
-            user=params.mapred_user,
-            not_if=no_op
-    )
-
-    Execute(no_op,
-            user=params.mapred_user,
-            not_if=no_op,
-            initial_wait=5
-    )
-  elif action == 'stop':
-    daemon_cmd = format("{cmd} stop {name}")
-    rm_pid =  format("rm -f {pid_file}")
-
-    Execute(daemon_cmd,
-            user=params.mapred_user
-    )
-    Execute(rm_pid)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/service_check.py
deleted file mode 100644
index c0a4a59..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/service_check.py
+++ /dev/null
@@ -1,89 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-class ServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    jar_location = params.hadoop_jar_location
-    input_file = 'mapredsmokeinput'
-    output_file = "mapredsmokeoutput"
-
-    cleanup_cmd = format("dfs -rmr {output_file} {input_file}")
-    create_file_cmd = format("{cleanup_cmd} ; hadoop dfs -put /etc/passwd {input_file}")
-    test_cmd = format("fs -test -e {output_file}")
-    run_wordcount_job = format("jar {jar_location}/hadoop-examples.jar wordcount {input_file} {output_file}")
-
-    if params.security_enabled:
-      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser};")
-
-      Execute(kinit_cmd,
-              user=params.smokeuser
-      )
-
-    ExecuteHadoop(create_file_cmd,
-                  tries=1,
-                  try_sleep=5,
-                  user=params.smokeuser,
-                  conf_dir=params.conf_dir
-    )
-
-    ExecuteHadoop(run_wordcount_job,
-                  tries=1,
-                  try_sleep=5,
-                  user=params.smokeuser,
-                  conf_dir=params.conf_dir,
-                  logoutput=True
-    )
-
-    ExecuteHadoop(test_cmd,
-                  user=params.smokeuser,
-                  conf_dir=params.conf_dir
-    )
-
-if __name__ == "__main__":
-  ServiceCheck().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/status_params.py
deleted file mode 100644
index f964a76..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/status_params.py
+++ /dev/null
@@ -1,33 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-mapred_user = config['configurations']['global']['mapred_user']
-pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix']
-mapred_pid_dir = format("{pid_dir_prefix}/{mapred_user}")
-
-jobtracker_pid_file = format("{mapred_pid_dir}/hadoop-{mapred_user}-jobtracker.pid")
-tasktracker_pid_file = format("{mapred_pid_dir}/hadoop-{mapred_user}-tasktracker.pid")
-historyserver_pid_file = format("{mapred_pid_dir}/hadoop-{mapred_user}-historyserver.pid")
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/tasktracker.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/tasktracker.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/tasktracker.py
deleted file mode 100644
index 77d974b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/scripts/tasktracker.py
+++ /dev/null
@@ -1,104 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-import sys
-from resource_management import *
-
-from mapreduce import mapreduce
-from service import service
-
-class Tasktracker(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    mapreduce()
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env) # FOR SECURITY
-    service('tasktracker',
-            action='start'
-    )
-
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    service('tasktracker',
-            action='stop'
-    )
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.tasktracker_pid_file)
-
-if __name__ == "__main__":
-  Tasktracker().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/templates/exclude_hosts_list.j2
deleted file mode 100644
index 02fc5fe..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/MAPREDUCE/package/templates/exclude_hosts_list.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-{% for host in mr_exclude_hosts %}
-{{host}}
-{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/configuration/global.xml
deleted file mode 100644
index 61a2b90..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/configuration/global.xml
+++ /dev/null
@@ -1,50 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>nagios_user</name>
-    <value>nagios</value>
-    <description>Nagios Username.</description>
-  </property>
-  <property>
-    <name>nagios_group</name>
-    <value>nagios</value>
-    <description>Nagios Group.</description>
-  </property>
-  <property>
-    <name>nagios_web_login</name>
-    <value>nagiosadmin</value>
-    <description>Nagios web user.</description>
-  </property>
-  <property>
-    <name>nagios_web_password</name>
-    <value></value>
-    <description>Nagios Admin Password.</description>
-  </property>
-  <property>
-    <name>nagios_contact</name>
-    <value></value>
-    <description>Hadoop Admin Email.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/metainfo.xml
deleted file mode 100644
index a4c500d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/metainfo.xml
+++ /dev/null
@@ -1,106 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>NAGIOS</name>
-      <comment>Nagios Monitoring and Alerting system</comment>
-      <version>3.5.0</version>
-      <components>
-        <component>
-            <name>NAGIOS_SERVER</name>
-            <category>MASTER</category>
-            <cardinality>1</cardinality>
-            <commandScript>
-              <script>scripts/nagios_server.py</script>
-              <scriptType>PYTHON</scriptType>
-              <timeout>600</timeout>
-            </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>perl</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>perl-Net-SNMP</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>nagios-plugins-1.4.9</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>nagios-3.5.0-99</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>nagios-www-3.5.0-99</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>nagios-devel-3.5.0-99</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>fping</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hdp_mon_nagios_addons</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osType>suse</osType>
-          <package>
-            <type>rpm</type>
-            <name>php5-json</name>
-          </package>
-        </osSpecific>
-        <osSpecific>
-          <osType>centos5</osType>
-          <package>
-            <type>rpm</type>
-            <name>php-pecl-json.x86_64</name>
-          </package>
-        </osSpecific>
-        <osSpecific>
-          <osType>redhat5</osType>
-          <package>
-            <type>rpm</type>
-            <name>php-pecl-json.x86_64</name>
-          </package>
-        </osSpecific>
-        <osSpecific>
-          <osType>oraclelinux5</osType>
-          <package>
-            <type>rpm</type>
-            <name>php-pecl-json.x86_64</name>
-          </package>
-        </osSpecific>
-      </osSpecifics>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_aggregate.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_aggregate.php b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_aggregate.php
deleted file mode 100644
index f4063fb..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_aggregate.php
+++ /dev/null
@@ -1,243 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-  $options = getopt ("f:s:n:w:c:t:");
-  if (!array_key_exists('t', $options) || !array_key_exists('f', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options) || !array_key_exists('s', $options)) {
-    usage();
-    exit(3);
-  }
-  $status_file=$options['f'];
-  $status_code=$options['s'];
-  $type=$options['t'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-  if ($type == "service" && !array_key_exists('n', $options)) {
-    echo "Service description not provided -n option\n";
-    exit(3);
-  }
-  if ($type == "service") {
-    $service_name=$options['n'];
-    /* echo "DESC: " . $service_name . "\n"; */
-  }
-
-  $result = array();
-  $status_file_content = file_get_contents($status_file);
-
-  $counts;
-  if ($type == "service") {
-    $counts=query_alert_count($status_file_content, $service_name, $status_code);
-  } else {
-    $counts=query_host_count($status_file_content, $status_code);
-  }
-
-  if ($counts['total'] == 0) {
-    $percent = 0;
-  } else {
-    $percent = ($counts['actual']/$counts['total'])*100;
-  }
-  if ($percent >= $crit) {
-    echo "CRITICAL: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
-    exit (2);
-  }
-  if ($percent >= $warn) {
-    echo "WARNING: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
-    exit (1);
-  }
-  echo "OK: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
-  exit(0);
-
-
-  # Functions
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -f <status_file_path> -t type(host/service) -s <status_codes> -n <service description> -w <warn%> -c <crit%>\n";
-  }
-
-  /* Query host count */
-  function query_host_count ($status_file_content, $status_code) {
-    $num_matches = preg_match_all("/hoststatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
-    $hostcounts_object = array ();
-    $total_hosts = 0;
-    $hosts = 0;
-    foreach ($matches[0] as $object) {
-      $total_hosts++;
-      if (getParameter($object, "current_state") == $status_code) {
-        $hosts++;
-      }
-    }
-    $hostcounts_object['total'] = $total_hosts;
-    $hostcounts_object['actual'] = $hosts;
-    return $hostcounts_object;
-  }
-
-  /* Query Alert counts */
-  function query_alert_count ($status_file_content, $service_name, $status_code) {
-    $num_matches = preg_match_all("/servicestatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
-    $alertcounts_objects = array ();
-    $total_alerts=0;
-    $alerts=0;
-    foreach ($matches[0] as $object) {
-      if (getParameter($object, "service_description") == $service_name) {
-        $total_alerts++;
-        if (getParameter($object, "current_state") >= $status_code) {
-          $alerts++;
-        }
-      }
-    }
-    $alertcounts_objects['total'] = $total_alerts;
-    $alertcounts_objects['actual'] = $alerts;
-    return $alertcounts_objects;
-  }
-
-  function get_service_type($service_description)
-  {
-    $pieces = explode("::", $service_description);
-    switch ($pieces[0]) {
-      case "NAMENODE":
-        $pieces[0] = "HDFS";
-        break;
-      case "JOBTRACKER":
-        $pieces[0] = "MAPREDUCE";
-        break;
-      case "HBASEMASTER":
-        $pieces[0] = "HBASE";
-        break;
-      case "SYSTEM":
-      case "HDFS":
-      case "MAPREDUCE":
-      case "HBASE":
-        break;
-      default:
-        $pieces[0] = "UNKNOWN";
-    }
-    return $pieces[0];
-  }
-
-  function getParameter($object, $key)
-  {
-    $pattern="/\s" . $key . "[\s= ]*([\S, ]*)\n/";
-    $num_mat = preg_match($pattern, $object, $matches);
-    $value = "";
-    if ($num_mat) {
-      $value = $matches[1];
-    }
-    return $value;
-  }
-
-function indent($json) {
-
-    $result      = '';
-    $pos         = 0;
-    $strLen      = strlen($json);
-    $indentStr   = '  ';
-    $newLine     = "\n";
-    $prevChar    = '';
-    $outOfQuotes = true;
-
-    for ($i=0; $i<=$strLen; $i++) {
-
-        // Grab the next character in the string.
-        $char = substr($json, $i, 1);
-
-        // Are we inside a quoted string?
-        if ($char == '"' && $prevChar != '\\') {
-            $outOfQuotes = !$outOfQuotes;
-
-        // If this character is the end of an element,
-        // output a new line and indent the next line.
-        } else if(($char == '}' || $char == ']') && $outOfQuotes) {
-            $result .= $newLine;
-            $pos --;
-            for ($j=0; $j<$pos; $j++) {
-                $result .= $indentStr;
-            }
-        }
-
-        // Add the character to the result string.
-        $result .= $char;
-
-        // If the last character was the beginning of an element,
-        // output a new line and indent the next line.
-        if (($char == ',' || $char == '{' || $char == '[') && $outOfQuotes) {
-            $result .= $newLine;
-            if ($char == '{' || $char == '[') {
-                $pos ++;
-            }
-
-            for ($j = 0; $j < $pos; $j++) {
-                $result .= $indentStr;
-            }
-        }
-
-        $prevChar = $char;
-    }
-
-    return $result;
-}
-
-/* JSON documment format */
-/*
-{
-  "programstatus":{
-    "last_command_check":"1327385743"
-  },
-  "hostcounts":{
-    "up_nodes":"",
-    "down_nodes":""
-  },
-  "hoststatus":[
-    {
-      "host_name"="ip-10-242-191-48.ec2.internal",
-      "current_state":"0",
-      "last_hard_state":"0",
-      "plugin_output":"PING OK - Packet loss = 0%, RTA = 0.04 ms",
-      "last_check":"1327385564",
-      "current_attempt":"1",
-      "last_hard_state_change":"1327362079",
-      "last_time_up":"1327385574",
-      "last_time_down":"0",
-      "last_time_unreachable":"0",
-      "is_flapping":"0",
-      "last_check":"1327385574",
-      "servicestatus":[
-      ]
-    }
-  ],
-  "servicestatus":[
-    {
-      "service_type":"HDFS",  {HBASE, MAPREDUCE, HIVE, ZOOKEEPER}
-      "service_description":"HDFS Current Load",
-      "host_name"="ip-10-242-191-48.ec2.internal",
-      "current_attempt":"1",
-      "current_state":"0",
-      "plugin_output":"PING OK - Packet loss = 0%, RTA = 0.04 ms",
-      "last_hard_state_change":"1327362079",
-      "last_time_ok":"1327385479",
-      "last_time_warning":"0",
-      "last_time_unknown":"0",
-      "last_time_critical":"0",
-      "last_check":"1327385574",
-      "is_flapping":"0"
-    }
-  ]
-}
-*/
-
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_cpu.pl
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_cpu.pl b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_cpu.pl
deleted file mode 100644
index a5680f7..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_cpu.pl
+++ /dev/null
@@ -1,114 +0,0 @@
-#!/usr/bin/perl -w 
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-use strict;
-use Net::SNMP;
-use Getopt::Long;
-
-# Variable
-my $base_proc = "1.3.6.1.2.1.25.3.3.1";   
-my $proc_load = "1.3.6.1.2.1.25.3.3.1.2"; 
-my $o_host = 	undef;
-my $o_community = undef;
-my $o_warn=	undef;
-my $o_crit=	undef;
-my $o_timeout = 15;
-my $o_port = 161;
-
-sub Usage {
-    print "Usage: $0 -H <host> -C <snmp_community> -w <warn level> -c <crit level>\n";
-}
-
-Getopt::Long::Configure ("bundling");
-GetOptions(
-  'H:s'   => \$o_host,	
-  'C:s'   => \$o_community,	
-  'c:s'   => \$o_crit,        
-  'w:s'   => \$o_warn
-          );
-if (!defined $o_host || !defined $o_community || !defined $o_crit || !defined $o_warn) {
-  Usage();
-  exit 3;
-}
-$o_warn =~ s/\%//g; 
-$o_crit =~ s/\%//g;
-alarm ($o_timeout);
-$SIG{'ALRM'} = sub {
- print "Unable to contact host: $o_host\n";
- exit 3;
-};
-
-# Connect to host
-my ($session,$error);
-($session, $error) = Net::SNMP->session(
-		-hostname  => $o_host,
-		-community => $o_community,
-		-port      => $o_port,
-		-timeout   => $o_timeout
-	  );
-if (!defined($session)) {
-   printf("Error opening session: %s.\n", $error);
-   exit 3;
-}
-
-my $exit_val=undef;
-my $resultat =  (Net::SNMP->VERSION < 4) ?
-	  $session->get_table($base_proc)
-	: $session->get_table(Baseoid => $base_proc);
-
-if (!defined($resultat)) {
-   printf("ERROR: Description table : %s.\n", $session->error);
-   $session->close;
-   exit 3;
-}
-
-$session->close;
-
-my ($cpu_used,$ncpu)=(0,0);
-foreach my $key ( keys %$resultat) {
-  if ($key =~ /$proc_load/) {
-    $cpu_used += $$resultat{$key};
-    $ncpu++;
-  }
-}
-
-if ($ncpu==0) {
-  print "Can't find CPU usage information : UNKNOWN\n";
-  exit 3;
-}
-
-$cpu_used /= $ncpu;
-
-print "$ncpu CPU, ", $ncpu==1 ? "load" : "average load";
-printf(" %.1f%%",$cpu_used);
-$exit_val=0;
-
-if ($cpu_used > $o_crit) {
- print " > $o_crit% : CRITICAL\n";
- $exit_val=2;
-} else {
-  if ($cpu_used > $o_warn) {
-   print " > $o_warn% : WARNING\n";
-   $exit_val=1;
-  }
-}
-print " < $o_warn% : OK\n" if ($exit_val eq 0);
-exit $exit_val;

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_datanode_storage.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_datanode_storage.php b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_datanode_storage.php
deleted file mode 100644
index dee22b4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_datanode_storage.php
+++ /dev/null
@@ -1,100 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * check the storage capacity remaining on local datanode storage
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:w:c:e:k:r:t:s:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options) 
-      || !array_key_exists('c', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $host=$options['h'];
-  $port=$options['p'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['s'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-  /* Get the json document */
-  $ch = curl_init();
-  $username = rtrim(`id -un`, "\n");
-  curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=DataNode,name=FSDatasetState-*",
-                                CURLOPT_RETURNTRANSFER => true,
-                                CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                CURLOPT_USERPWD => "$username:",
-                                CURLOPT_SSL_VERIFYPEER => FALSE ));
-  $json_string = curl_exec($ch);
-  $info = curl_getinfo($ch);
-  if (intval($info['http_code']) == 401){
-    logout();
-    $json_string = curl_exec($ch);
-  }
-  $info = curl_getinfo($ch);
-  curl_close($ch);
-  $json_array = json_decode($json_string, true);
-  $object = $json_array['beans'][0];
-  $cap_remain = $object['Remaining']; /* Total capacity - any extenal files created in data directories by non-hadoop app */
-  $cap_total = $object['Capacity']; /* Capacity used by all data partitions minus space reserved for M/R */
-  if (count($object) == 0) {
-    echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-    exit(2);
-  }  
-  $percent_full = ($cap_total - $cap_remain)/$cap_total * 100;
-
-  $out_msg = "Capacity:[" . $cap_total . 
-             "], Remaining Capacity:[" . $cap_remain . 
-             "], percent_full:[" . $percent_full  . "]";
-  
-  if ($percent_full > $crit) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  if ($percent_full > $warn) {
-    echo "WARNING: " . $out_msg . "\n";
-    exit (1);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
-  }
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_hdfs_blocks.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_hdfs_blocks.php b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_hdfs_blocks.php
deleted file mode 100644
index 19347b4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_hdfs_blocks.php
+++ /dev/null
@@ -1,115 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * check the corrupt or missing blocks % is > threshod
- * check_jmx -H hostaddress -p port -w 1% -c 1%
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:w:c:s:e:k:r:t:u:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options) || !array_key_exists('s', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $hosts=$options['h'];
-  $port=$options['p'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-  $nn_jmx_property=$options['s'];
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['u'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-
-  foreach (preg_split('/,/', $hosts) as $host) {
-    /* Get the json document */
-
-    $ch = curl_init();
-    $username = rtrim(`id -un`, "\n");
-    curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=".$nn_jmx_property,
-                                  CURLOPT_RETURNTRANSFER => true,
-                                  CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                  CURLOPT_USERPWD => "$username:",
-                                  CURLOPT_SSL_VERIFYPEER => FALSE ));
-    $json_string = curl_exec($ch);
-    $info = curl_getinfo($ch);
-    if (intval($info['http_code']) == 401){
-      logout();
-      $json_string = curl_exec($ch);
-    }
-    $info = curl_getinfo($ch);
-    curl_close($ch);
-    $json_array = json_decode($json_string, true);
-    $m_percent = 0;
-    $c_percent = 0;
-    $object = $json_array['beans'][0];
-    $missing_blocks = $object['MissingBlocks'];
-    $corrupt_blocks = $object['CorruptBlocks'];
-    $total_blocks = $object['BlocksTotal'];
-    if (count($object) == 0) {
-      echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-      exit(2);
-    }    
-    if($total_blocks == 0) {
-      $m_percent = 0;
-      $c_percent = 0;
-    } else {
-      $m_percent = ($missing_blocks/$total_blocks)*100;
-      $c_percent = ($corrupt_blocks/$total_blocks)*100;
-      break;
-    }
-  }
-  $out_msg = "corrupt_blocks:<" . $corrupt_blocks .
-             ">, missing_blocks:<" . $missing_blocks .
-             ">, total_blocks:<" . $total_blocks . ">";
-
-  if ($m_percent > $crit || $c_percent > $crit) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  if ($m_percent > $warn || $c_percent > $warn) {
-    echo "WARNING: " . $out_msg . "\n";
-    exit (1);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%> -s <namenode bean name> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
-  }
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_hdfs_capacity.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_hdfs_capacity.php b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_hdfs_capacity.php
deleted file mode 100644
index af72723..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_hdfs_capacity.php
+++ /dev/null
@@ -1,109 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * check the % HDFS capacity used >= warn and critical limits.
- * check_jmx -H hostaddress -p port -w 1 -c 1
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:w:c:e:k:r:t:s:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $hosts=$options['h'];
-  $port=$options['p'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['s'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-
-  foreach (preg_split('/,/', $hosts) as $host) {
-    /* Get the json document */
-    $ch = curl_init();
-    $username = rtrim(`id -un`, "\n");
-    curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=FSNamesystemState",
-                                  CURLOPT_RETURNTRANSFER => true,
-                                  CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                  CURLOPT_USERPWD => "$username:",
-                                  CURLOPT_SSL_VERIFYPEER => FALSE ));
-    $json_string = curl_exec($ch);
-    $info = curl_getinfo($ch);
-    if (intval($info['http_code']) == 401){
-      logout();
-      $json_string = curl_exec($ch);
-    }
-    $info = curl_getinfo($ch);
-    curl_close($ch);
-    $json_array = json_decode($json_string, true);
-    $percent = 0;
-    $object = $json_array['beans'][0];
-    $CapacityUsed = $object['CapacityUsed'];
-    $CapacityRemaining = $object['CapacityRemaining'];
-    if (count($object) == 0) {
-      echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-      exit(2);
-    }    
-    $CapacityTotal = $CapacityUsed + $CapacityRemaining;
-    if($CapacityTotal == 0) {
-      $percent = 0;
-    } else {
-      $percent = ($CapacityUsed/$CapacityTotal)*100;
-      break;
-    }
-  }
-  $out_msg = "DFSUsedGB:<" . round ($CapacityUsed/(1024*1024*1024),1) .
-             ">, DFSTotalGB:<" . round($CapacityTotal/(1024*1024*1024),1) . ">";
-
-  if ($percent >= $crit) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  if ($percent >= $warn) {
-    echo "WARNING: " . $out_msg . "\n";
-    exit (1);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
-  }
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_hive_metastore_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_hive_metastore_status.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_hive_metastore_status.sh
deleted file mode 100644
index 640c077..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_hive_metastore_status.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#The uri is of the form thrift://<hostname>:<port>
-HOST=$1
-PORT=$2
-JAVA_HOME=$3
-SEC_ENABLED=$4
-if [[ "$SEC_ENABLED" == "true" ]]; then
-  NAGIOS_KEYTAB=$5
-  NAGIOS_USER=$6
-  KINIT_PATH=$7
-  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
-  if [[ "$?" -ne 0 ]]; then
-    echo "CRITICAL: Error doing kinit for nagios [$out1]";
-    exit 2;
-  fi
-fi
-HCAT_URL=-Dhive.metastore.uris="thrift://$HOST:$PORT"
-export JAVA_HOME=$JAVA_HOME
-out=`hcat $HCAT_URL -e "show databases" 2>&1`
-if [[ "$?" -ne 0 ]]; then
-  echo "CRITICAL: Error accessing Hive Metastore status [$out]";
-  exit 2;
-fi
-echo "OK: Hive Metastore status OK";
-exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_hue_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_hue_status.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_hue_status.sh
deleted file mode 100644
index 076d9b3..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_hue_status.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-status=`/etc/init.d/hue status 2>&1`
-
-if [[ "$?" -ne 0 ]]; then
-	echo "WARNING: Hue is stopped";
-	exit 1;
-fi
-
-echo "OK: Hue is running";
-exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_mapred_local_dir_used.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_mapred_local_dir_used.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_mapred_local_dir_used.sh
deleted file mode 100644
index 15c85eb..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_mapred_local_dir_used.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-MAPRED_LOCAL_DIRS=$1
-CRITICAL=`echo $2 | cut -d % -f 1`
-IFS=","
-for mapred_dir in $MAPRED_LOCAL_DIRS
-do
-  percent=`df -hl $mapred_dir | awk '{percent=$5;} END{print percent}' | cut -d % -f 1`
-  if [ $percent -ge $CRITICAL ]; then
-    echo "CRITICAL: MapReduce local dir is full."
-    exit 2
-  fi
-done
-echo "OK: MapReduce local dir space is available."
-exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_name_dir_status.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_name_dir_status.php b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_name_dir_status.php
deleted file mode 100644
index 186166d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_name_dir_status.php
+++ /dev/null
@@ -1,93 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to namenode, get the jmx-json document
- * check the NameDirStatuses to find any offline (failed) directories
- * check_jmx -H hostaddress -p port -k keytab path -r principal name -t kinit path -s security enabled
- */
- 
-  include "hdp_nagios_init.php";
-
-  $options = getopt("h:p:e:k:r:t:s:");
-  //Check only for mandatory options
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $host=$options['h'];
-  $port=$options['p'];
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['s'];
-  $ssl_enabled=$options['e'];
-  
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-  /* Get the json document */
-  $ch = curl_init();
-  $username = rtrim(`id -un`, "\n");
-  curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo",
-                                CURLOPT_RETURNTRANSFER => true,
-                                CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                CURLOPT_USERPWD => "$username:",
-                                CURLOPT_SSL_VERIFYPEER => FALSE ));
-  $json_string = curl_exec($ch);
-  $info = curl_getinfo($ch);
-  if (intval($info['http_code']) == 401){
-    logout();
-    $json_string = curl_exec($ch);
-  }
-  $info = curl_getinfo($ch);
-  curl_close($ch);
-  $json_array = json_decode($json_string, true);
-  $object = $json_array['beans'][0];
-  if ($object['NameDirStatuses'] == "") {
-    echo "WARNING: NameNode directory status not available via ".$protocol."://".$host.":".$port."/jmx url, code " . $info['http_code'] ."\n";
-    exit(1);
-  }
-  $NameDirStatuses = json_decode($object['NameDirStatuses'], true);
-  $failed_dir_count = count($NameDirStatuses['failed']);
-  $out_msg = "CRITICAL: Offline NameNode directories: ";
-  if ($failed_dir_count > 0) {
-    foreach ($NameDirStatuses['failed'] as $key => $value) {
-      $out_msg = $out_msg . $key . ":" . $value . ", ";
-    }
-    echo $out_msg . "\n";
-    exit (2);
-  }
-  echo "OK: All NameNode directories are active" . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled";
-  }
-?>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_namenodes_ha.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_namenodes_ha.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_namenodes_ha.sh
deleted file mode 100644
index 50b075a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_namenodes_ha.sh
+++ /dev/null
@@ -1,82 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-IFS=',' read -a namenodes <<< "$1"
-port=$2
-totalNN=${#namenodes[@]}
-activeNN=()
-standbyNN=()
-unavailableNN=()
-
-for nn in "${namenodes[@]}"
-do
-  status=$(curl -m 5 -s http://$nn:$port/jmx?qry=Hadoop:service=NameNode,name=FSNamesystem | grep -i "tag.HAState" | grep -o -E "standby|active")
-  if [ "$status" == "active" ]; then
-    activeNN[${#activeNN[*]}]="$nn"
-  elif [ "$status" == "standby" ]; then
-    standbyNN[${#standbyNN[*]}]="$nn"
-  elif [ "$status" == "" ]; then
-    unavailableNN[${#unavailableNN[*]}]="$nn"
-  fi
-done
-
-message=""
-critical=false
-
-if [ ${#activeNN[@]} -gt 1 ]; then
-  critical=true
-  message=$message" Only one NN can have HAState=active;"
-elif [ ${#activeNN[@]} == 0 ]; then
-  critical=true
-  message=$message" No Active NN available;"
-elif [ ${#standbyNN[@]} == 0 ]; then
-  critical=true
-  message=$message" No Standby NN available;"
-fi
-
-NNstats=" Active<"
-for nn in "${activeNN[@]}"
-do
-  NNstats="$NNstats$nn;"
-done
-NNstats=${NNstats%\;}
-NNstats=$NNstats">, Standby<"
-for nn in "${standbyNN[@]}"
-do
-  NNstats="$NNstats$nn;"
-done
-NNstats=${NNstats%\;}
-NNstats=$NNstats">, Unavailable<"
-for nn in "${unavailableNN[@]}"
-do
-  NNstats="$NNstats$nn;"
-done
-NNstats=${NNstats%\;}
-NNstats=$NNstats">"
-
-if [ $critical == false ]; then
-  echo "OK: NameNode HA healthy;"$NNstats
-  exit 0
-fi
-
-echo "CRITICAL:"$message$NNstats
-exit 2

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_nodemanager_health.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_nodemanager_health.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_nodemanager_health.sh
deleted file mode 100644
index 020b41d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_nodemanager_health.sh
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-HOST=$1
-PORT=$2
-NODEMANAGER_URL="http://$HOST:$PORT/ws/v1/node/info"
-SEC_ENABLED=$3
-export PATH="/usr/bin:$PATH"
-if [[ "$SEC_ENABLED" == "true" ]]; then
-  NAGIOS_KEYTAB=$4
-  NAGIOS_USER=$5
-  KINIT_PATH=$6
-  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
-  if [[ "$?" -ne 0 ]]; then
-    echo "CRITICAL: Error doing kinit for nagios [$out1]";
-    exit 2;
-  fi
-fi
-
-RESPONSE=`curl --negotiate -u : -s $NODEMANAGER_URL`
-if [[ "$RESPONSE" == *'"nodeHealthy":true'* ]]; then 
-  echo "OK: NodeManager healthy";
-  exit 0;
-fi
-echo "CRITICAL: NodeManager unhealthy";
-exit 2;

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_oozie_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_oozie_status.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_oozie_status.sh
deleted file mode 100644
index 820ee99..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_oozie_status.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-# OOZIE_URL is of the form http://<hostname>:<port>/oozie
-HOST=`echo $1 | tr '[:upper:]' '[:lower:]'`
-PORT=$2
-JAVA_HOME=$3
-SEC_ENABLED=$4
-if [[ "$SEC_ENABLED" == "true" ]]; then
-  NAGIOS_KEYTAB=$5
-  NAGIOS_USER=$6
-  KINIT_PATH=$7
-  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
-  if [[ "$?" -ne 0 ]]; then
-    echo "CRITICAL: Error doing kinit for nagios [$out1]";
-    exit 2;
-  fi
-fi
-OOZIE_URL="http://$HOST:$PORT/oozie"
-export JAVA_HOME=$JAVA_HOME
-out=`oozie admin -oozie ${OOZIE_URL} -status 2>&1`
-if [[ "$?" -ne 0 ]]; then 
-  echo "CRITICAL: Error accessing Oozie Server status [$out]";
-  exit 2;
-fi
-echo "OK: Oozie Server status [$out]";
-exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_rpcq_latency.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_rpcq_latency.php b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_rpcq_latency.php
deleted file mode 100644
index 463f69b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_rpcq_latency.php
+++ /dev/null
@@ -1,104 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * It checks the rpc wait time in the queue, RpcQueueTime_avg_time
- * check_rpcq_latency -h hostaddress -p port -t ServiceName -w 1 -c 1
- * Warning and Critical values are in seconds
- * Service Name = JobTracker, NameNode, JobHistoryServer
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:w:c:n:e:k:r:t:s:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options) || !array_key_exists('n', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $host=$options['h'];
-  $port=$options['p'];
-  $master=$options['n'];
-  $warn=$options['w'];
-  $crit=$options['c'];
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['s'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-
-  /* Get the json document */
-  $ch = curl_init();
-  $username = rtrim(`id -un`, "\n");
-  curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=".$master.",name=RpcActivityForPort*",
-                                CURLOPT_RETURNTRANSFER => true,
-                                CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                CURLOPT_USERPWD => "$username:",
-                                CURLOPT_SSL_VERIFYPEER => FALSE ));
-  $json_string = curl_exec($ch);
-  $info = curl_getinfo($ch);
-  if (intval($info['http_code']) == 401){
-    logout();
-    $json_string = curl_exec($ch);
-  }
-  $info = curl_getinfo($ch);
-  curl_close($ch);
-  $json_array = json_decode($json_string, true);
-  $object = $json_array['beans'][0];
-  if (count($object) == 0) {
-    echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-    exit(2);
-  } 
-  $RpcQueueTime_avg_time = round($object['RpcQueueTime_avg_time'], 2); 
-  $RpcProcessingTime_avg_time = round($object['RpcProcessingTime_avg_time'], 2);
-
-  $out_msg = "RpcQueueTime_avg_time:<" . $RpcQueueTime_avg_time .
-             "> Secs, RpcProcessingTime_avg_time:<" . $RpcProcessingTime_avg_time .
-             "> Secs";
-
-  if ($RpcQueueTime_avg_time >= $crit) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  if ($RpcQueueTime_avg_time >= $warn) {
-    echo "WARNING: " . $out_msg . "\n";
-    exit (1);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -n <JobTracker/NameNode/JobHistoryServer> -w <warn_in_sec> -c <crit_in_sec> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
-  }
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_templeton_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_templeton_status.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_templeton_status.sh
deleted file mode 100644
index 7fbc4c4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_templeton_status.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-# out='{"status":"ok","version":"v1"}<status_code:200>'
-HOST=$1
-PORT=$2
-VERSION=$3
-SEC_ENABLED=$4
-if [[ "$SEC_ENABLED" == "true" ]]; then 
-  NAGIOS_KEYTAB=$5
-  NAGIOS_USER=$6
-  KINIT_PATH=$7
-  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
-  if [[ "$?" -ne 0 ]]; then
-    echo "CRITICAL: Error doing kinit for nagios [$out1]";
-    exit 2;
-  fi
-fi
-regex="^.*\"status\":\"ok\".*<status_code:200>$"
-out=`curl --negotiate -u : -s -w '<status_code:%{http_code}>' http://$HOST:$PORT/templeton/$VERSION/status 2>&1`
-if [[ $out =~ $regex ]]; then
-  out=`echo "$out" | sed -e 's/{/[/g' | sed -e 's/}/]/g'` 
-  echo "OK: WebHCat Server status [$out]";
-  exit 0;
-fi
-echo "CRITICAL: Error accessing WebHCat Server, status [$out]";
-exit 2;

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_webui.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_webui.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_webui.sh
deleted file mode 100644
index b23045e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/check_webui.sh
+++ /dev/null
@@ -1,87 +0,0 @@
-#!/bin/bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-checkurl () {
-  url=$1
-  curl $url -o /dev/null
-  echo $?
-}
-
-service=$1
-host=$2
-port=$3
-
-if [[ -z "$service" || -z "$host" ]]; then
-  echo "UNKNOWN: Invalid arguments; Usage: check_webui.sh service_name host_name";
-  exit 3;
-fi
-
-case "$service" in
-
-jobtracker) 
-    jtweburl="http://$host:$port"
-    if [[ `checkurl "$jtweburl"` -ne 0 ]]; then 
-      echo "WARNING: Jobtracker web UI not accessible : $jtweburl";
-      exit 1;
-    fi
-    ;;
-namenode)
-    nnweburl="http://$host:$port"
-    if [[ `checkurl "$nnweburl"` -ne 0 ]] ; then 
-      echo "WARNING: NameNode Web UI not accessible : $nnweburl";
-      exit 1;
-    fi
-    ;;
-jobhistory)
-    jhweburl="http://$host:$port/jobhistoryhome.jsp"
-    if [[ `checkurl "$jhweburl"` -ne 0 ]]; then 
-      echo "WARNING: HistoryServer Web UI not accessible : $jhweburl";
-      exit 1;
-    fi
-    ;;
-hbase)
-    hbaseweburl="http://$host:$port/master-status"
-    if [[ `checkurl "$hbaseweburl"` -ne 0 ]]; then 
-      echo "WARNING: HBase Master Web UI not accessible : $hbaseweburl";
-      exit 1;
-    fi
-    ;;
-resourcemanager)
-    rmweburl="http://$host:$port/cluster"
-    if [[ `checkurl "$rmweburl"` -ne 0 ]]; then 
-      echo "WARNING: ResourceManager Web UI not accessible : $rmweburl";
-      exit 1;
-    fi
-    ;;
-historyserver2)
-    hsweburl="http://$host:$port/jobhistory"
-    if [[ `checkurl "$hsweburl"` -ne 0 ]]; then 
-      echo "WARNING: HistoryServer Web UI not accessible : $hsweburl";
-      exit 1;
-    fi
-    ;;
-*) echo "UNKNOWN: Invalid service name [$service], valid options [jobtracker|jobhistory|hbase|namenode|resourcemanager|historyserver2]"
-   exit 3
-   ;;
-esac
-
-echo "OK: Successfully accessed $service Web UI"
-exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/hdp_nagios_init.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/hdp_nagios_init.php b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/hdp_nagios_init.php
deleted file mode 100644
index 487eb43..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/files/hdp_nagios_init.php
+++ /dev/null
@@ -1,81 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* Common functions called from other alerts
- *
- */
- 
- /*
- * Function for kinit. Checks if security enabled and klist for this principal doesn't returns nothing,
- * make kinit call in this case.
- */
-  function kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name) {
-    if($security_enabled === 'true') {
-    
-      $is_logined = is_logined($principal_name);
-      
-      if (!$is_logined)
-        $status = kinit($kinit_path_local, $keytab_path, $principal_name);
-      else
-        $status = array(0, '');
-    } else {
-      $status = array(0, '');
-    }
-  
-    return $status;
-  }
-  
-  
-  /*
-  * Checks if user is logined on kerberos
-  */
-  function is_logined($principal_name) {
-    $check_cmd = "klist|grep $principal_name 1> /dev/null 2>/dev/null ; [[ $? != 0 ]] && echo 1";
-    $check_output =  shell_exec($check_cmd);
-    
-    if ($check_output)
-      return false;
-    else
-      return true;
-  }
-
-  /*
-  * Runs kinit command.
-  */
-  function kinit($kinit_path_local, $keytab_path, $principal_name) {
-    $init_cmd = "$kinit_path_local -kt $keytab_path $principal_name 2>&1";
-    $kinit_output = shell_exec($init_cmd);
-    if ($kinit_output) 
-      $status = array(1, $kinit_output);
-    else
-      $status = array(0, '');
-      
-    return $status;
-  }
-
-  function logout() {
-    if (shell_exec("rm -f /tmp/krb5cc_".trim(shell_exec('id -u'))) == "" ) 
-      $status = true;
-    else
-      $status = false;
-      
-    return $status;
-  }
- 
- ?>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/scripts/functions.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/scripts/functions.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/scripts/functions.py
deleted file mode 100644
index 964225e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/scripts/functions.py
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-from resource_management.libraries.script.config_dictionary import UnknownConfiguration
-
-def get_port_from_url(address):
-  if not is_empty(address):
-    return address.split(':')[-1]
-  else:
-    return address
-  
-def is_empty(var):
-  return isinstance(var, UnknownConfiguration)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/scripts/nagios.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/scripts/nagios.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/scripts/nagios.py
deleted file mode 100644
index af09e87..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/scripts/nagios.py
+++ /dev/null
@@ -1,97 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-from nagios_server_config import nagios_server_config
-
-def nagios():
-  import params
-
-  File( params.nagios_httpd_config_file,
-    owner = params.nagios_user,
-    group = params.nagios_group,
-    content = Template("nagios.conf.j2"),
-    mode   = 0644
-  )
-
-  # enable snmpd
-  Execute( "service snmpd start; chkconfig snmpd on",
-    path = "/usr/local/bin/:/bin/:/sbin/"
-  )
-  
-  Directory( params.conf_dir,
-    owner = params.nagios_user,
-    group = params.nagios_group
-  )
-
-  Directory( [params.plugins_dir, params.nagios_obj_dir])
-
-  Directory( params.nagios_pid_dir,
-    owner = params.nagios_user,
-    group = params.nagios_group,
-    mode = 0755,
-    recursive = True
-  )
-
-  Directory( [params.nagios_var_dir, params.check_result_path, params.nagios_rw_dir],
-    owner = params.nagios_user,
-    group = params.nagios_group,
-    recursive = True
-  )
-  
-  Directory( [params.nagios_log_dir, params.nagios_log_archives_dir],
-    owner = params.nagios_user,
-    group = params.nagios_group,
-    mode = 0755
-  )
-
-  nagios_server_config()
-
-  set_web_permisssions()
-
-  File( format("{conf_dir}/command.cfg"),
-    owner = params.nagios_user,
-    group = params.nagios_group
-  )
-  
-  
-def set_web_permisssions():
-  import params
-
-  cmd = format("{htpasswd_cmd} -c -b  /etc/nagios/htpasswd.users {nagios_web_login} {nagios_web_password}")
-  test = format("grep {nagios_web_login} /etc/nagios/htpasswd.users")
-  Execute( cmd,
-    not_if = test
-  )
-
-  File( "/etc/nagios/htpasswd.users",
-    owner = params.nagios_user,
-    group = params.nagios_group,
-    mode  = 0640
-  )
-
-  if System.get_instance().platform == "suse":
-    command = format("usermod -G {nagios_group} wwwrun")
-  else:
-    command = format("usermod -a -G {nagios_group} apache")
-  
-  Execute( command)

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/scripts/nagios_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/scripts/nagios_server.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/scripts/nagios_server.py
deleted file mode 100644
index 02685c7..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/scripts/nagios_server.py
+++ /dev/null
@@ -1,87 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-from resource_management import *
-from nagios import nagios
-from nagios_service import nagios_service
-
-         
-class NagiosServer(Script):
-  def install(self, env):
-    remove_conflicting_packages()
-    self.install_packages(env)
-    self.configure(env)
-    
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    nagios()
-
-    
-  def start(self, env):
-    import params
-    env.set_params(params)
-
-    self.configure(env) # done for updating configs after Security enabled
-    nagios_service(action='start')
-
-    
-  def stop(self, env):
-    import params
-    env.set_params(params)
-    
-    nagios_service(action='stop')
-
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.nagios_pid_file)
-    
-def remove_conflicting_packages():  
-  Package( 'hdp_mon_nagios_addons',
-    action = "remove"
-  )
-
-  Package( 'nagios-plugins',
-    action = "remove"
-  )
-
-  Execute( "rpm -e --allmatches --nopostun nagios",
-    path    = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
-    ignore_failures = True 
-  )
-
-def main():
-  command_type = sys.argv[1] if len(sys.argv)>1 else "install"
-  print "Running "+command_type
-  command_data_file = '/var/lib/ambari-agent/data/command-3.json'
-  basedir = '/root/ambari/ambari-server/src/main/resources/stacks/HDP/2.0._/services/NAGIOS/package'
-  stroutfile = '/1.txt'
-  sys.argv = ["", command_type, command_data_file, basedir, stroutfile]
-  
-  NagiosServer().execute()
-  
-if __name__ == "__main__":
-  #main()
-  NagiosServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/scripts/nagios_server_config.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/scripts/nagios_server_config.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/scripts/nagios_server_config.py
deleted file mode 100644
index b3e639c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/scripts/nagios_server_config.py
+++ /dev/null
@@ -1,91 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-def nagios_server_config():
-  import params
-  
-  nagios_server_configfile( 'nagios.cfg', 
-                            config_dir = params.conf_dir, 
-                            group = params.nagios_group
-  )
-  nagios_server_configfile( 'resource.cfg', 
-                            config_dir = params.conf_dir, 
-                            group = params.nagios_group
-  )
-  nagios_server_configfile( 'hadoop-hosts.cfg')
-  nagios_server_configfile( 'hadoop-hostgroups.cfg')
-  nagios_server_configfile( 'hadoop-servicegroups.cfg')
-  nagios_server_configfile( 'hadoop-services.cfg')
-  nagios_server_configfile( 'hadoop-commands.cfg')
-  nagios_server_configfile( 'contacts.cfg')
-  
-  if System.get_instance().platform != "suse":
-    nagios_server_configfile( 'nagios',
-                              config_dir = '/etc/init.d/', 
-                              mode = 0755, 
-                              owner = 'root', 
-                              group = 'root'
-    )
-
-  nagios_server_check( 'check_cpu.pl')
-  nagios_server_check( 'check_datanode_storage.php')
-  nagios_server_check( 'check_aggregate.php')
-  nagios_server_check( 'check_hdfs_blocks.php')
-  nagios_server_check( 'check_hdfs_capacity.php')
-  nagios_server_check( 'check_rpcq_latency.php')
-  nagios_server_check( 'check_webui.sh')
-  nagios_server_check( 'check_name_dir_status.php')
-  nagios_server_check( 'check_oozie_status.sh')
-  nagios_server_check( 'check_templeton_status.sh')
-  nagios_server_check( 'check_hive_metastore_status.sh')
-  nagios_server_check( 'check_hue_status.sh')
-  nagios_server_check( 'check_mapred_local_dir_used.sh')
-  nagios_server_check( 'check_nodemanager_health.sh')
-  nagios_server_check( 'check_namenodes_ha.sh')
-  nagios_server_check( 'hdp_nagios_init.php')
-
-
-def nagios_server_configfile(
-  name,
-  owner = None,
-  group = None,
-  config_dir = None,
-  mode = None
-):
-  import params
-  owner = params.nagios_user if not owner else owner
-  group = params.user_group if not group else group
-  config_dir = params.nagios_obj_dir if not config_dir else config_dir
-  
-  TemplateConfig( format("{config_dir}/{name}"),
-    owner          = owner,
-    group          = group,
-    mode           = mode
-  )
-
-def nagios_server_check(name):
-  File( format("{plugins_dir}/{name}"),
-    content = StaticFile(name), 
-    mode = 0755
-  )
\ No newline at end of file


[04/12] AMBARI-4336. Move 1.3.4 stack to 1.3.3 using the python libraries. (mahadev)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/scripts/nagios_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/scripts/nagios_service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/scripts/nagios_service.py
deleted file mode 100644
index cc411b5..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/scripts/nagios_service.py
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-def nagios_service(action='start'): # start or stop
-  import params
-
-  if action == 'start':
-   command = "service nagios start"
-  elif action == 'stop':
-   command = format("service nagios stop && rm -f {nagios_pid_file}")
-
-  Execute( command,
-     path    = "/usr/local/bin/:/bin/:/sbin/"      
-  )
-  MonitorWebserver("restart")
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/scripts/params.py
deleted file mode 100644
index 8694dff..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/scripts/params.py
+++ /dev/null
@@ -1,168 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-from functions import get_port_from_url
-import status_params
-
-# server configurations
-config = Script.get_config()
-
-conf_dir = "/etc/nagios"
-nagios_var_dir = "/var/nagios"
-nagios_rw_dir = "/var/nagios/rw"
-plugins_dir = "/usr/lib64/nagios/plugins"
-nagios_obj_dir = "/etc/nagios/objects"
-check_result_path = "/var/nagios/spool/checkresults"
-nagios_httpd_config_file = format("/etc/httpd/conf.d/nagios.conf")
-nagios_log_dir = "/var/log/nagios"
-nagios_log_archives_dir = format("{nagios_log_dir}/archives")
-nagios_host_cfg = format("{nagios_obj_dir}/hadoop-hosts.cfg")
-nagios_lookup_daemon_str = "/usr/sbin/nagios"
-nagios_pid_dir = status_params.nagios_pid_dir
-nagios_pid_file = status_params.nagios_pid_file
-nagios_resource_cfg = format("{conf_dir}/resource.cfg")
-nagios_hostgroup_cfg = format("{nagios_obj_dir}/hadoop-hostgroups.cfg")
-nagios_servicegroup_cfg = format("{nagios_obj_dir}/hadoop-servicegroups.cfg")
-nagios_service_cfg = format("{nagios_obj_dir}/hadoop-services.cfg")
-nagios_command_cfg = format("{nagios_obj_dir}/hadoop-commands.cfg")
-eventhandlers_dir = "/usr/lib/nagios/eventhandlers"
-nagios_principal_name = default("nagios_principal_name", "nagios")
-hadoop_ssl_enabled = False
-
-namenode_metadata_port = "8020"
-oozie_server_port = "11000"
-# different to HDP2    
-namenode_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.http.address'])
-# different to HDP2  
-snamenode_port = get_port_from_url(config['configurations']['hdfs-site']["dfs.secondary.http.address"])
-
-hbase_master_rpc_port = "60000"
-rm_port = get_port_from_url(config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'])
-nm_port = "8042"
-hs_port = get_port_from_url(config['configurations']['mapred-site']['mapreduce.history.server.http.address'])
-journalnode_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.journalnode.http-address'])
-datanode_port = config['configurations']['hdfs-site']['ambari.dfs.datanode.http.port']
-flume_port = "4159"
-hive_metastore_port = config['configurations']['global']['hive_metastore_port'] #"9083"
-templeton_port = config['configurations']['webhcat-site']['templeton.port'] #"50111"
-hbase_rs_port = "60030"
-
-# this 4 is different for HDP2
-jtnode_port = get_port_from_url(config['configurations']['mapred-site']['mapred.job.tracker.http.address'])
-jobhistory_port = get_port_from_url(config['configurations']['mapred-site']['mapreduce.history.server.http.address'])
-tasktracker_port = "50060"
-mapred_local_dir = config['configurations']['mapred-site']['mapred.local.dir']
-
-# this is different for HDP2
-nn_metrics_property = "FSNamesystemMetrics"
-clientPort = config['configurations']['global']['clientPort'] #ZK 
-
-
-java64_home = config['hostLevelParams']['java_home']
-security_enabled = config['configurations']['global']['security_enabled']
-
-nagios_keytab_path = default("nagios_keytab_path", "/etc/security/keytabs/nagios.service.keytab")
-kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-
-dfs_ha_enabled = False
-dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
-dfs_ha_namenode_ids = default(format("hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
-if dfs_ha_namenode_ids:
-  dfs_ha_namenode_ids_array_len = len(dfs_ha_namenode_ids.split(","))
-  if dfs_ha_namenode_ids_array_len > 1:
-    dfs_ha_enabled = True
-
-ganglia_port = "8651"
-ganglia_collector_slaves_port = "8660"
-ganglia_collector_namenode_port = "8661"
-ganglia_collector_jobtracker_port = "8662"
-ganglia_collector_hbase_port = "8663"
-ganglia_collector_rm_port = "8664"
-ganglia_collector_nm_port = "8660"
-ganglia_collector_hs_port = "8666"
-  
-all_ping_ports = config['clusterHostInfo']['all_ping_ports']
-
-if System.get_instance().platform == "suse":
-  nagios_p1_pl = "/usr/lib/nagios/p1.pl"
-  htpasswd_cmd = "htpasswd2"
-else:
-  nagios_p1_pl = "/usr/bin/p1.pl"
-  htpasswd_cmd = "htpasswd"
-  
-nagios_user = config['configurations']['global']['nagios_user']
-nagios_group = config['configurations']['global']['nagios_group']
-nagios_web_login = config['configurations']['global']['nagios_web_login']
-nagios_web_password = config['configurations']['global']['nagios_web_password']
-user_group = config['configurations']['global']['user_group']
-nagios_contact = config['configurations']['global']['nagios_contact']
-
-namenode_host = default("/clusterHostInfo/namenode_host", None)
-_snamenode_host = default("/clusterHostInfo/snamenode_host", None)
-_jtnode_host = default("/clusterHostInfo/jtnode_host", None)
-_slave_hosts = default("/clusterHostInfo/slave_hosts", None)
-_journalnode_hosts = default("/clusterHostInfo/journalnode_hosts", None)
-_zkfc_hosts = default("/clusterHostInfo/zkfc_hosts", None)
-_rm_host = default("/clusterHostInfo/rm_host", None)
-_nm_hosts = default("/clusterHostInfo/nm_hosts", None)
-_hs_host = default("/clusterHostInfo/hs_host", None)
-_zookeeper_hosts = default("/clusterHostInfo/zookeeper_hosts", None)
-_flume_hosts = default("/clusterHostInfo/flume_hosts", None)
-_nagios_server_host = default("/clusterHostInfo/nagios_server_host",None)
-_ganglia_server_host = default("/clusterHostInfo/ganglia_server_host",None)
-
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts",None)
-_hive_server_host = default("/clusterHostInfo/hive_server_host",None)
-_oozie_server = default("/clusterHostInfo/oozie_server",None)
-_webhcat_server_host = default("/clusterHostInfo/webhcat_server_host",None)
-# can differ on HDP2
-_mapred_tt_hosts = _slave_hosts
-#if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
-_hbase_rs_hosts = default("/clusterHostInfo/hbase_rs_hosts", _slave_hosts)
-_hue_server_host = default("/clusterHostInfo/hue_server_host", None)
-all_hosts = config['clusterHostInfo']['all_hosts']
-
-
-hostgroup_defs = {
-    'namenode' : namenode_host,
-    'snamenode' : _snamenode_host,
-    'slaves' : _slave_hosts,
-    # no in HDP2
-    'tasktracker-servers' : _mapred_tt_hosts,
-    'agent-servers' : all_hosts,
-    'nagios-server' : _nagios_server_host,
-    'jobtracker' : _jtnode_host,
-    'ganglia-server' : _ganglia_server_host,
-    'flume-servers' : _flume_hosts,
-    'zookeeper-servers' : _zookeeper_hosts,
-    'hbasemasters' : hbase_master_hosts,
-    'hiveserver' : _hive_server_host,
-    'region-servers' : _hbase_rs_hosts,
-    'oozie-server' : _oozie_server,
-    'webhcat-server' : _webhcat_server_host,
-    'hue-server' : _hue_server_host,
-    'resourcemanager' : _rm_host,
-    'nodemanagers' : _nm_hosts,
-    'historyserver2' : _hs_host,
-    'journalnodes' : _journalnode_hosts
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/scripts/status_params.py
deleted file mode 100644
index 33b35fe..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/scripts/status_params.py
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-nagios_pid_dir = "/var/run/nagios"
-nagios_pid_file = format("{nagios_pid_dir}/nagios.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/templates/contacts.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/templates/contacts.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/templates/contacts.cfg.j2
deleted file mode 100644
index 9dada51..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/templates/contacts.cfg.j2
+++ /dev/null
@@ -1,91 +0,0 @@
-###############################################################################
-# CONTACTS.CFG - SAMPLE CONTACT/CONTACTGROUP DEFINITIONS
-#
-# Last Modified: 05-31-2007
-#
-# NOTES: This config file provides you with some example contact and contact
-#        group definitions that you can reference in host and service
-#        definitions.
-#       
-#        You don't need to keep these definitions in a separate file from your
-#        other object definitions.  This has been done just to make things
-#        easier to understand.
-#
-###############################################################################
-
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-
-###############################################################################
-###############################################################################
-#
-# CONTACTS
-#
-###############################################################################
-###############################################################################
-
-# Just one contact defined by default - the Nagios admin (that's you)
-# This contact definition inherits a lot of default values from the 'generic-contact' 
-# template which is defined elsewhere.
-
-define contact{
-        contact_name    {{nagios_web_login}}                                        ; Short name of user
-        use             generic-contact                                             ; Inherit default values from generic-contact template (defined above)
-        alias           Nagios Admin                                                ; Full name of user
-
-        email           {{nagios_contact}}	; <<***** CHANGE THIS TO YOUR EMAIL ADDRESS ******
-        }
-
-# Contact which writes all Nagios alerts to the system logger.
-define contact{
-        contact_name                    sys_logger         ; Short name of user
-        use                             generic-contact    ; Inherit default values from generic-contact template (defined above)
-        alias                           System Logger      ; Full name of user
-        host_notifications_enabled      1
-        service_notifications_enabled   1
-        service_notification_period     24x7
-        host_notification_period        24x7
-        service_notification_options    w,u,c,r,s
-        host_notification_options       d,u,r,s
-        can_submit_commands             1
-        retain_status_information       1
-        service_notification_commands   service_sys_logger
-        host_notification_commands      host_sys_logger
-        }
-
-###############################################################################
-###############################################################################
-#
-# CONTACT GROUPS
-#
-###############################################################################
-###############################################################################
-
-# We only have one contact in this simple configuration file, so there is
-# no need to create more than one contact group.
-
-define contactgroup {
-        contactgroup_name       admins
-        alias                   Nagios Administrators
-        members                 {{nagios_web_login}},sys_logger
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/templates/hadoop-commands.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/templates/hadoop-commands.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/templates/hadoop-commands.cfg.j2
deleted file mode 100644
index e47a09e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/templates/hadoop-commands.cfg.j2
+++ /dev/null
@@ -1,114 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-{% if env.system.platform != "suse" %}
-# 'check_cpu' check remote cpu load
-define command {
-        command_name    check_cpu
-        command_line    $USER1$/check_cpu.pl -H $HOSTADDRESS$ -C hadoop -w $ARG1$ -c $ARG2$
-       }
-{% endif %}
-
-# Check data node storage full 
-define command {
-        command_name    check_datanode_storage
-        command_line    php $USER1$/check_datanode_storage.php -h $HOSTADDRESS$ -p $ARG1$ -w $ARG2$ -c $ARG3$ -e $ARG4$ -k $ARG5$ -r $ARG6$ -t $ARG7$ -s $ARG8$
-       }
-
-define command{
-        command_name    check_hdfs_blocks
-        command_line    php $USER1$/check_hdfs_blocks.php -h $ARG1$ -p $ARG2$ -w $ARG3$ -c $ARG4$ -s $ARG5$ -e $ARG6$ -k $ARG7$ -r $ARG8$ -t $ARG9$ -u $ARG10$
-       }
-
-define command{
-        command_name    check_hdfs_capacity
-        command_line    php $USER1$/check_hdfs_capacity.php -h $ARG1$ -p $ARG2$ -w $ARG3$ -c $ARG4$ -e $ARG5$ -k $ARG6$ -r $ARG7$ -t $ARG8$ -s $ARG9$
-       }
-
-define command{
-        command_name    check_aggregate
-        command_line    php $USER1$/check_aggregate.php -f /var/nagios/status.dat -s 1 -t service -n $ARG1$ -w $ARG2$ -c $ARG3$
-       }
-
-define command{
-        command_name    check_rpcq_latency
-        command_line    php $USER1$/check_rpcq_latency.php -h $HOSTADDRESS$ -p $ARG2$ -n $ARG1$ -w $ARG3$ -c $ARG4$ -e $ARG5$ -k $ARG6$ -r $ARG7$ -t $ARG8$ -s $ARG9$
-       }
-
-define command{
-        command_name    check_nagios
-        command_line    $USER1$/check_nagios -e $ARG1$ -F $ARG2$ -C $ARG3$ 
-       }
-
-define command{
-        command_name    check_webui
-        command_line    $USER1$/check_webui.sh $ARG1$ $HOSTADDRESS$ $ARG2$
-       }
-
-define command{
-        command_name    check_name_dir_status
-        command_line    php $USER1$/check_name_dir_status.php -h $HOSTADDRESS$ -p $ARG1$ -e $ARG2$ -k $ARG3$ -r $ARG4$ -t $ARG5$ -s $ARG6$
-       }
-
-define command{
-        command_name    check_oozie_status
-        command_line    $USER1$/check_oozie_status.sh $HOSTADDRESS$ $ARG1$ $ARG2$ $ARG3$ $ARG4$ $ARG5$ $ARG6$ $ARG7$
-       }
-
-define command{
-        command_name    check_templeton_status
-        command_line    $USER1$/check_templeton_status.sh $HOSTADDRESS$ $ARG1$ $ARG2$ $ARG3$ $ARG4$ $ARG5$ $ARG6$ $ARG7$
-       }
-
-define command{
-        command_name    check_hive_metastore_status
-        command_line    $USER1$/check_hive_metastore_status.sh $HOSTADDRESS$ $ARG1$ $ARG2$ $ARG3$ $ARG4$ $ARG5$ $ARG6$ $ARG7$
-       }
-define command{
-        command_name    check_hue_status
-        command_line    $USER1$/check_hue_status.sh
-       }
-
-define command{
-       command_name    check_mapred_local_dir_used_space
-       command_line    $USER1$/check_mapred_local_dir_used.sh $ARG1$ $ARG2$
-       }
-
-define command{
-       command_name    check_namenodes_ha
-       command_line    $USER1$/check_namenodes_ha.sh $ARG1$ $ARG2$
-       }
-
-define command{
-        command_name    check_nodemanager_health
-        command_line    $USER1$/check_nodemanager_health.sh $HOSTADDRESS$ $ARG1$
-       }
-
-define command{
-        command_name    host_sys_logger
-        command_line    $USER1$/sys_logger.py $HOSTSTATETYPE$ $HOSTATTEMPT$ $HOSTSTATE$ "Host::Ping" "Event Host=$HOSTADDRESS$($HOSTSTATE$), $HOSTOUTPUT$ $LONGHOSTOUTPUT$"
-       }
-
-define command{
-        command_name    service_sys_logger
-        command_line    $USER1$/sys_logger.py $SERVICESTATETYPE$ $SERVICEATTEMPT$ $SERVICESTATE$ "$SERVICEDESC$" "Event Host=$HOSTADDRESS$ Service Description=$SERVICEDESC$($SERVICESTATE$), $SERVICEOUTPUT$ $LONGSERVICEOUTPUT$"
-       }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2
deleted file mode 100644
index d24e5cd..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2
+++ /dev/null
@@ -1,33 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-{% for name, hosts in hostgroup_defs.iteritems() %}
-{% if hosts %}
-define hostgroup {
-        hostgroup_name  {{name}}
-        alias           {{name}}
-        members         {{','.join(hosts)}}
-}
-{% endif %}
-{% endfor %}
-
-define hostgroup {
-        hostgroup_name  all-servers
-        alias           All Servers
-        members         {{','.join(all_hosts)}}
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2
deleted file mode 100644
index 778e4f8..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2
+++ /dev/null
@@ -1,34 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-{% for host in all_hosts %}
-define host {
-        alias        {{host}}
-        host_name    {{host}}
-        use          linux-server
-        address      {{host}}
-        check_interval         0.25
-        retry_interval         0.25
-        max_check_attempts     4
-        notifications_enabled     1
-        first_notification_delay  0     # Send notification soon after change in the hard state
-        notification_interval     0     # Send the notification once
-        notification_options      d,u,r
-}
-
-{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2
deleted file mode 100644
index 233051f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2
+++ /dev/null
@@ -1,98 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-{% if hostgroup_defs['namenode'] or
-  hostgroup_defs['snamenode']  or
-  hostgroup_defs['slaves'] %}
-define servicegroup {
-  servicegroup_name  HDFS
-  alias  HDFS Checks
-}
-{% endif %}
-{%if hostgroup_defs['jobtracker'] or
-  hostgroup_defs['historyserver2']-%}
-define servicegroup {
-  servicegroup_name  MAPREDUCE
-  alias  MAPREDUCE Checks
-}
-{% endif %}
-{%if hostgroup_defs['resourcemanager'] or
-  hostgroup_defs['nodemanagers'] %}
-define servicegroup {
-  servicegroup_name  YARN
-  alias  YARN Checks
-}
-{% endif %}
-{%if hostgroup_defs['flume-servers'] %}
-define servicegroup {
-  servicegroup_name  FLUME
-  alias  FLUME Checks
-}
-{% endif %}
-{%if hostgroup_defs['hbasemasters'] %}
-define servicegroup {
-  servicegroup_name  HBASE
-  alias  HBASE Checks
-}
-{% endif %}
-{% if hostgroup_defs['oozie-server'] %}
-define servicegroup {
-  servicegroup_name  OOZIE
-  alias  OOZIE Checks
-}
-{% endif %}
-{% if hostgroup_defs['webhcat-server'] %}
-define servicegroup {
-  servicegroup_name  WEBHCAT
-  alias  WEBHCAT Checks
-}
-{% endif %}
-{% if hostgroup_defs['nagios-server'] %}
-define servicegroup {
-  servicegroup_name  NAGIOS
-  alias  NAGIOS Checks
-}
-{% endif %}
-{% if hostgroup_defs['ganglia-server'] %}
-define servicegroup {
-  servicegroup_name  GANGLIA
-  alias  GANGLIA Checks
-}
-{% endif %}
-{% if hostgroup_defs['hiveserver'] %}
-define servicegroup {
-  servicegroup_name  HIVE-METASTORE
-  alias  HIVE-METASTORE Checks
-}
-{% endif %}
-{% if hostgroup_defs['zookeeper-servers'] %}
-define servicegroup {
-  servicegroup_name  ZOOKEEPER
-  alias  ZOOKEEPER Checks
-}
-{% endif %}
-define servicegroup {
-  servicegroup_name  AMBARI
-  alias  AMBARI Checks
-}
-{% if hostgroup_defs['hue-server'] %}
-define servicegroup {
-  servicegroup_name  HUE
-  alias  HUE Checks
-}
-{% endif %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/templates/hadoop-services.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/templates/hadoop-services.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/templates/hadoop-services.cfg.j2
deleted file mode 100644
index d3e5e24..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/templates/hadoop-services.cfg.j2
+++ /dev/null
@@ -1,714 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-{# TODO: Look for { or } in created file #}
-# NAGIOS SERVER Check (status log update)
-{% if hostgroup_defs['nagios-server'] %}
-define service {
-        name                            hadoop-service
-        use                             generic-service
-        notification_options            w,u,c,r,f,s
-        first_notification_delay        0
-        notification_interval           0                 # Send the notification once
-        contact_groups                  admins
-        notifications_enabled           1
-        event_handler_enabled           1
-        register                        0
-}
-
-define service {        
-        hostgroup_name          nagios-server        
-        use                     hadoop-service
-        service_description     NAGIOS::Nagios status log freshness
-        servicegroups           NAGIOS
-        check_command           check_nagios!10!/var/nagios/status.dat!{{nagios_lookup_daemon_str}}
-        normal_check_interval   5
-        retry_check_interval    0.5
-        max_check_attempts      2
-}
-
-# NAGIOS SERVER HDFS Checks
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::Percent DataNodes with space available
-        servicegroups           HDFS
-        check_command           check_aggregate!"DATANODE::DataNode space"!10%!30%
-        normal_check_interval   2
-        retry_check_interval    1 
-        max_check_attempts      1
-}
-
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::Percent DataNodes live
-        servicegroups           HDFS
-        check_command           check_aggregate!"DATANODE::DataNode process"!10%!30%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-
-# AMBARI AGENT Checks
-{% for hostname in all_hosts %}
-define service {
-        host_name	        {{ hostname }}
-        use                     hadoop-service
-        service_description     AMBARI::Ambari Agent process
-        servicegroups           AMBARI
-        check_command           check_tcp!{{all_ping_ports[loop.index-1]}}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-
-{% endfor %}
-
-# NAGIOS SERVER ZOOKEEPER Checks
-{% if hostgroup_defs['zookeeper-servers'] %}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     ZOOKEEPER::Percent ZooKeeper Servers live
-        servicegroups           ZOOKEEPER
-        check_command           check_aggregate!"ZOOKEEPER::ZooKeeper Server process"!35%!70%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-
-# NAGIOS SERVER HBASE Checks
-{% if hostgroup_defs['hbasemasters'] %}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HBASE::Percent RegionServers live
-        servicegroups           HBASE
-        check_command           check_aggregate!"REGIONSERVER::RegionServer process"!10%!30%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-{% endif %}
-
-
-
-# GANGLIA SERVER Checks
-{% if hostgroup_defs['ganglia-server'] %}
-define service {
-        hostgroup_name          ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Server process
-        servicegroups           GANGLIA
-        check_command           check_tcp!{{ ganglia_port }}!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-
-define service {
-        hostgroup_name          ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Monitor process for NameNode
-        servicegroups           GANGLIA
-        check_command           check_tcp!{{ ganglia_collector_namenode_port }}!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-
-{% if hostgroup_defs['jobtracker'] %}
-define service {
-        hostgroup_name          ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Monitor process for JobTracker
-        servicegroups           GANGLIA
-        check_command           check_tcp!{{ ganglia_collector_jobtracker_port }}!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-{% endif %}
-
-{% if hostgroup_defs['hbasemasters'] %}
-define service {
-        hostgroup_name          ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Monitor process for HBase Master
-        servicegroups           GANGLIA
-        check_command           check_tcp!{{ ganglia_collector_hbase_port }}!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-{% endif %}
-
-{% if hostgroup_defs['resourcemanager'] %}
-define service {
-        hostgroup_name          ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Monitor process for ResourceManager
-        servicegroups           GANGLIA
-        check_command           check_tcp!{{ ganglia_collector_rm_port }}!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-{% endif %}
-
-{% if hostgroup_defs['historyserver2'] %}
-define service {
-        hostgroup_name          ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Monitor process for HistoryServer
-        servicegroups           GANGLIA
-        check_command           check_tcp!{{ ganglia_collector_hs_port }}!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-{% endif %}
-
-{% endif %}
-
-{% if hostgroup_defs['snamenode'] %}
-# Secondary namenode checks
-define service {
-        hostgroup_name          snamenode
-        use                     hadoop-service
-        service_description     NAMENODE::Secondary NameNode process
-        servicegroups           HDFS
-        check_command           check_tcp!{{ snamenode_port }}!-w 1 -c 1
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-
-
-{% if hostgroup_defs['namenode'] %}
-# HDFS Checks
-{%  for namenode_hostname in namenode_host %}
-{# TODO: check if we can get rid of str, lower #}
-define service {
-        host_name               {{ namenode_hostname }}
-        use                     hadoop-service
-        service_description     NAMENODE::NameNode edit logs directory status on {{ namenode_hostname }}
-        servicegroups           HDFS
-        check_command           check_name_dir_status!{{ namenode_port }}!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   0.5
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-{% if env.system.platform != "suse" %}
-define service {
-        host_name               {{ namenode_hostname }}
-        use                     hadoop-service
-        service_description     NAMENODE::NameNode host CPU utilization on {{ namenode_hostname }}
-        servicegroups           HDFS
-        check_command           check_cpu!200%!250%
-        normal_check_interval   5
-        retry_check_interval    2
-        max_check_attempts      5
-}
-{% endif %}
-
-define service {
-        host_name               {{ namenode_hostname }}
-        use                     hadoop-service
-        service_description     NAMENODE::NameNode Web UI on {{ namenode_hostname }}
-        servicegroups           HDFS
-        check_command           check_webui!namenode!{{ namenode_port }}
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-
-define service {
-        host_name               {{ namenode_hostname }}
-        use                     hadoop-service
-        service_description     NAMENODE::NameNode process on {{ namenode_hostname }}
-        servicegroups           HDFS
-        check_command           check_tcp!{{ namenode_metadata_port }}!-w 1 -c 1
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-
-define service {
-        host_name               {{ namenode_hostname }}
-        use                     hadoop-service
-        service_description     HDFS::NameNode RPC latency on {{ namenode_hostname }}
-        servicegroups           HDFS
-        check_command           check_rpcq_latency!NameNode!{{ namenode_port }}!3000!5000!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   5
-        retry_check_interval    1
-        max_check_attempts      5
-}
-
-{%  endfor  %}
-
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::Blocks health
-        servicegroups           HDFS
-        check_command           check_hdfs_blocks!$HOSTGROUPMEMBERS:namenode$!{{ namenode_port }}!0%!0%!{{ nn_metrics_property }}!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   2
-        retry_check_interval    1 
-        max_check_attempts      1
-}
-
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::HDFS capacity utilization
-        servicegroups           HDFS
-        check_command           check_hdfs_capacity!$HOSTGROUPMEMBERS:namenode$!{{ namenode_port }}!80%!90%!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   10
-        retry_check_interval    1 
-        max_check_attempts      1
-}
-
-{% endif %}
-
-# MAPREDUCE Checks
-{% if hostgroup_defs['jobtracker'] %}
-define service {
-        hostgroup_name          jobtracker
-        use                     hadoop-service
-        service_description     JOBTRACKER::JobTracker Web UI
-        servicegroups           MAPREDUCE
-        check_command           check_webui!jobtracker!{{ jtnode_port }}
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-
-define service {
-        hostgroup_name          jobtracker
-        use                     hadoop-service
-        service_description     JOBTRACKER::HistoryServer Web UI
-        servicegroups           MAPREDUCE
-        check_command           check_webui!jobhistory!{{ jobhistory_port }}
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-{% if env.system.platform != "suse" %}
-define service {
-        hostgroup_name          jobtracker
-        use                     hadoop-service
-        service_description     JOBTRACKER::JobTracker CPU utilization
-        servicegroups           MAPREDUCE
-        check_command           check_cpu!200%!250%
-        normal_check_interval   5
-        retry_check_interval    2 
-        max_check_attempts      5
-}
-{% endif %}
-
-define service {
-        hostgroup_name          jobtracker
-        use                     hadoop-service
-        use                     hadoop-service
-        service_description     JOBTRACKER::JobTracker process
-        servicegroups           MAPREDUCE
-        check_command           check_tcp!{{ jtnode_port }}!-w 1 -c 1
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-
-define service {
-        hostgroup_name          jobtracker
-        use                     hadoop-service
-        service_description     MAPREDUCE::JobTracker RPC latency
-        servicegroups           MAPREDUCE
-        check_command           check_rpcq_latency!JobTracker!{{ jtnode_port }}!3000!5000!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   5
-        retry_check_interval    1 
-        max_check_attempts      5
-}
-
-{% if hostgroup_defs['tasktracker-servers'] %}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     MAPREDUCE::Percent TaskTrackers live
-        servicegroups           MAPREDUCE
-        check_command           check_aggregate!"TASKTRACKER::TaskTracker process"!10%!30%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-
-# MAPREDUCE::TASKTRACKER Checks 
-define service {
-        hostgroup_name          tasktracker-servers
-        use                     hadoop-service
-        service_description     TASKTRACKER::TaskTracker process
-        servicegroups           MAPREDUCE
-        check_command           check_tcp!{{ tasktracker_port }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-# MAPREDUCE::TASKTRACKER Mapreduce local dir used space
-define service {
-        hostgroup_name          tasktracker-servers
-        use                     hadoop-service
-        service_description     ::MapReduce local dir space
-        servicegroups           MAPREDUCE
-        check_command           check_mapred_local_dir_used_space!{{ mapred_local_dir }}!85%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-
-{% endif %}
-{% endif %}
-
-{% if hostgroup_defs['resourcemanager'] %}
-# YARN::RESOURCEMANAGER Checks 
-define service {
-        hostgroup_name          resourcemanager
-        use                     hadoop-service
-        service_description     RESOURCEMANAGER::ResourceManager Web UI
-        servicegroups           YARN
-        check_command           check_webui!resourcemanager!{{ rm_port }}
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-
-{% if env.system.platform != "suse" %}
-define service {
-        hostgroup_name          resourcemanager
-        use                     hadoop-service
-        service_description     RESOURCEMANAGER::ResourceManager CPU utilization
-        servicegroups           YARN
-        check_command           check_cpu!200%!250%
-        normal_check_interval   5
-        retry_check_interval    2 
-        max_check_attempts      5
-}
-{% endif %}
-
-define service {
-        hostgroup_name          resourcemanager
-        use                     hadoop-service
-        service_description     RESOURCEMANAGER::ResourceManager RPC latency
-        servicegroups           YARN
-        check_command           check_rpcq_latency!ResourceManager!{{ rm_port }}!3000!5000!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   5
-        retry_check_interval    1 
-        max_check_attempts      5
-}
-
-define service {
-        hostgroup_name          resourcemanager
-        use                     hadoop-service
-        service_description     RESOURCEMANAGER::ResourceManager process
-        servicegroups           YARN
-        check_command           check_tcp!{{ rm_port }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{%  endif %}
-
-{% if hostgroup_defs['nodemanagers'] %}
-# YARN::NODEMANAGER Checks
-define service {
-        hostgroup_name          nodemanagers
-        use                     hadoop-service
-        service_description     NODEMANAGER::NodeManager process
-        servicegroups           YARN
-        check_command           check_tcp!{{ nm_port }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-define service {
-        hostgroup_name          nodemanagers
-        use                     hadoop-service
-        service_description     NODEMANAGER::NodeManager health
-        servicegroups           YARN
-        check_command           check_nodemanager_health!{{ nm_port }}!{{ str(security_enabled).lower() }}!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     NODEMANAGER::Percent NodeManagers live
-        servicegroups           YARN
-        check_command           check_aggregate!"NODEMANAGER::NodeManager process"!10%!30%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{%  endif %}
-
-{% if hostgroup_defs['historyserver2'] %}
-# MAPREDUCE::JOBHISTORY Checks
-define service {
-        hostgroup_name          historyserver2
-        use                     hadoop-service
-        service_description     JOBHISTORY::HistoryServer Web UI
-        servicegroups           MAPREDUCE
-        check_command           check_webui!historyserver2!{{ hs_port }}
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-
-{% if env.system.platform != "suse" %}
-define service {
-        hostgroup_name          historyserver2
-        use                     hadoop-service
-        service_description     JOBHISTORY::HistoryServer CPU utilization
-        servicegroups           MAPREDUCE
-        check_command           check_cpu!200%!250%
-        normal_check_interval   5
-        retry_check_interval    2 
-        max_check_attempts      5
-}
-{%  endif %}
-
-define service {
-        hostgroup_name          historyserver2
-        use                     hadoop-service
-        service_description     JOBHISTORY::HistoryServer process
-        servicegroups           MAPREDUCE
-        check_command           check_tcp!{{ hs_port }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-{%  endif %}
-
-{% if hostgroup_defs['journalnodes'] %}
-# Journalnode checks
-define service {
-        hostgroup_name          journalnodes
-        use                     hadoop-service
-        service_description     JOURNALNODE::JournalNode process
-        servicegroups           HDFS
-        check_command           check_tcp!{{ journalnode_port }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-{% if dfs_ha_enabled %}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::Percent JournalNodes live
-        servicegroups           HDFS
-        check_command           check_aggregate!"JOURNALNODE::JournalNode process"!33%!50%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-{% endif %}
-
-{% if hostgroup_defs['slaves'] %}
-# HDFS::DATANODE Checks
-define service {
-        hostgroup_name          slaves
-        use                     hadoop-service
-        service_description     DATANODE::DataNode process
-        servicegroups           HDFS
-        check_command           check_tcp!{{datanode_port}}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-define service {
-        hostgroup_name          slaves
-        use                     hadoop-service
-        service_description     DATANODE::DataNode space
-        servicegroups           HDFS
-        check_command           check_datanode_storage!{{ datanode_port }}!90%!90%!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   5
-        retry_check_interval    1
-        max_check_attempts      2
-}
-
-{% endif %}
-
-{% if hostgroup_defs['flume-servers'] %}
-# FLUME Checks
-define service {
-        hostgroup_name          flume-servers
-        use                     hadoop-service
-        service_description     FLUME::Flume Agent process
-        servicegroups           FLUME
-        check_command           check_tcp!{{ flume_port }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-
-
-{% if hostgroup_defs['zookeeper-servers'] %}
-# ZOOKEEPER Checks
-define service {
-        hostgroup_name          zookeeper-servers
-        use                     hadoop-service
-        service_description     ZOOKEEPER::ZooKeeper Server process
-        servicegroups           ZOOKEEPER
-        check_command           check_tcp!{{ clientPort }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-
-{% if hostgroup_defs['hbasemasters'] %}
-# HBASE::REGIONSERVER Checks
-define service {
-        hostgroup_name          region-servers
-        use                     hadoop-service
-        service_description     REGIONSERVER::RegionServer process
-        servicegroups           HBASE
-        check_command           check_tcp!{{ hbase_rs_port }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-{# HBASE:: MASTER Checks
-# define service {
-#         hostgroup_name          hbasemasters
-#         use                     hadoop-service
-#         service_description     HBASEMASTER::HBase Master Web UI
-#         servicegroups           HBASE
-#         check_command           check_webui!hbase!{{ hbase_master_port }}
-#         normal_check_interval   1
-#         retry_check_interval    1
-#         max_check_attempts      3
-# #}
-{%  for hbasemaster in hbase_master_hosts  %}
-{% if env.system.platform != "suse" %}
-define service {
-        host_name               {{ hbasemaster }}
-        use                     hadoop-service
-        service_description     HBASEMASTER::HBase Master CPU utilization on {{ hbasemaster }}
-        servicegroups           HBASE
-        check_command           check_cpu!200%!250%
-        normal_check_interval   5
-        retry_check_interval    2 
-        max_check_attempts      5
-}
-{%  endif %}
-define service {
-        host_name               {{ hbasemaster }}
-        use                     hadoop-service
-        service_description     HBASEMASTER::HBase Master process on {{ hbasemaster }}
-        servicegroups           HBASE
-        check_command           check_tcp!{{ hbase_master_rpc_port }}!-w 1 -c 1
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-{% endfor %}
-{% endif %}
-
-{% if hostgroup_defs['hiveserver'] %}
-# HIVE Metastore check
-define service {
-        hostgroup_name          hiveserver
-        use                     hadoop-service
-        service_description     HIVE-METASTORE::Hive Metastore status
-        servicegroups           HIVE-METASTORE
-        {% if security_enabled %}
-        check_command           check_hive_metastore_status!{{ hive_metastore_port }}!{{ java64_home }}!true!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}
-        {% else %}
-        check_command           check_hive_metastore_status!{{ hive_metastore_port }}!{{ java64_home }}!false
-        {% endif %}
-        normal_check_interval   0.5
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-{% if hostgroup_defs['oozie-server'] %}
-# Oozie check
-define service {
-        hostgroup_name          oozie-server
-        use                     hadoop-service
-        service_description     OOZIE::Oozie Server status
-        servicegroups           OOZIE
-        {% if security_enabled %}
-        check_command           check_oozie_status!{{ oozie_server_port }}!{{ java64_home }}!true!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}
-        {% else %}
-        check_command           check_oozie_status!{{ oozie_server_port }}!{{ java64_home }}!false
-        {% endif %}
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-{% endif %}
-{% if hostgroup_defs['webhcat-server'] %}
-# WEBHCAT check
-define service {
-        hostgroup_name          webhcat-server
-        use                     hadoop-service
-        service_description     WEBHCAT::WebHCat Server status
-        servicegroups           WEBHCAT 
-        {% if security_enabled %}
-        check_command           check_templeton_status!{{ templeton_port }}!v1!{{ str(security_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}
-        {% else %}
-        check_command           check_templeton_status!{{ templeton_port }}!v1!false
-        {% endif %}
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-
-{% if hostgroup_defs['hue-server'] %}
-define service {
-        hostgroup_name          hue-server
-        use                     hadoop-service
-        service_description     HUE::Hue Server status
-        servicegroups           HUE
-        check_command           check_hue_status
-        normal_check_interval   100
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-


[02/12] AMBARI-4336. Move 1.3.4 stack to 1.3.3 using the python libraries. (mahadev)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/package/scripts/oozie_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/package/scripts/oozie_service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/package/scripts/oozie_service.py
deleted file mode 100644
index 1d8767c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/package/scripts/oozie_service.py
+++ /dev/null
@@ -1,45 +0,0 @@
-from resource_management import *
-
-def oozie_service(action = 'start'): # 'start' or 'stop'
-  import params
-
-  kinit_if_needed = format("{kinit_path_local} -kt {oozie_keytab} {oozie_principal};") if params.security_enabled else ""
-  no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
-  
-  if action == 'start':
-    start_cmd = format("cd {oozie_tmp_dir} && /usr/lib/oozie/bin/oozie-start.sh")
-    
-    if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
-      db_connection_check_command = format("{java_home}/bin/java -cp {check_db_connection_jar}:{jdbc_driver_jar} org.apache.ambari.server.DBConnectionVerification {oozie_jdbc_connection_url} {oozie_metastore_user_name} {oozie_metastore_user_passwd} {jdbc_driver_name}")
-    else:
-      db_connection_check_command = None
-      
-    cmd1 =  format("cd {oozie_tmp_dir} && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run")
-    cmd2 =  format("{kinit_if_needed} hadoop dfs -put /usr/lib/oozie/share {oozie_hdfs_user_dir} ; hadoop dfs -chmod -R 755 {oozie_hdfs_user_dir}/share")
-      
-    if db_connection_check_command:
-      Execute( db_connection_check_command)
-                  
-    Execute( cmd1,
-      user = params.oozie_user,
-      not_if  = no_op_test,
-      ignore_failures = True
-    ) 
-    
-    Execute( cmd2,
-      user = params.oozie_user,       
-      not_if = format("{kinit_if_needed} hadoop dfs -ls /user/oozie/share | awk 'BEGIN {{count=0;}} /share/ {{count++}} END {{if (count > 0) {{exit 0}} else {{exit 1}}}}'")
-    )
-    
-    Execute( start_cmd,
-      user = params.oozie_user,
-      not_if  = no_op_test,
-    )
-  elif action == 'stop':
-    stop_cmd  = format("su - {oozie_user} -c  'cd {oozie_tmp_dir} && /usr/lib/oozie/bin/oozie-stop.sh' && rm -f {pid_file}")
-    Execute( stop_cmd,
-      only_if  = no_op_test
-    )
-
-  
-  

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/package/scripts/params.py
deleted file mode 100644
index 0466ad8..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/package/scripts/params.py
+++ /dev/null
@@ -1,64 +0,0 @@
-from resource_management import *
-import status_params
-
-# server configurations
-config = Script.get_config()
-
-oozie_user = config['configurations']['global']['oozie_user']
-smokeuser = config['configurations']['global']['smokeuser']
-conf_dir = "/etc/oozie/conf"
-hadoop_conf_dir = "/etc/hadoop/conf"
-user_group = config['configurations']['global']['user_group']
-jdk_location = config['hostLevelParams']['jdk_location']
-check_db_connection_jar_name = "DBConnectionVerification.jar"
-check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
-hadoop_prefix = "/usr"
-oozie_tmp_dir = "/var/tmp/oozie"
-oozie_hdfs_user_dir = format("/user/{oozie_user}")
-oozie_pid_dir = status_params.oozie_pid_dir
-pid_file = status_params.pid_file
-hadoop_jar_location = "/usr/lib/hadoop/"
-# for HDP2 it's "/usr/share/HDP-oozie/ext-2.2.zip"
-ext_js_path = "/usr/share/HDP-oozie/ext.zip"
-oozie_libext_dir = "/usr/lib/oozie/libext"
-lzo_enabled = config['configurations']['global']['lzo_enabled']
-security_enabled = config['configurations']['global']['security_enabled']
-
-kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-oozie_service_keytab = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.keytab.file']
-oozie_principal = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.kerberos.principal']
-smokeuser_keytab = config['configurations']['global']['smokeuser_keytab']
-oozie_keytab = config['configurations']['global']['oozie_keytab']
-
-oracle_driver_jar_name = "ojdbc6.jar"
-java_share_dir = "/usr/share/java"
-
-java_home = config['hostLevelParams']['java_home']
-oozie_metastore_user_name = config['configurations']['oozie-site']['oozie.service.JPAService.jdbc.username']
-oozie_metastore_user_passwd = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.password","")
-oozie_jdbc_connection_url = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.url", "")
-oozie_log_dir = config['configurations']['global']['oozie_log_dir']
-oozie_data_dir = config['configurations']['global']['oozie_data_dir']
-oozie_lib_dir = "/var/lib/oozie/"
-oozie_webapps_dir = "/var/lib/oozie/oozie-server/webapps/"
-
-jdbc_driver_name = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.driver", "")
-
-if jdbc_driver_name == "com.mysql.jdbc.Driver":
-  jdbc_driver_jar = "/usr/share/java/mysql-connector-java.jar"
-elif jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
-  jdbc_driver_jar = "/usr/share/java/ojdbc6.jar"
-else:
-  jdbc_driver_jar = ""
-  
-if lzo_enabled or jdbc_driver_name:
-  jar_option = "-jars"         
-else:
-  jar_option = ""
-  
-lzo_jar_suffix = "/usr/lib/hadoop/lib/hadoop-lzo-0.5.0.jar" if lzo_enabled else ""
-  
-if lzo_enabled and jdbc_driver_name:
-    jar_path = format("{lzo_jar_suffix}:{jdbc_driver_jar}")        
-else:
-    jar_path = "{lzo_jar_suffix}{jdbc_driver_jar}"
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/package/scripts/service_check.py
deleted file mode 100644
index 7dbfc87..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/package/scripts/service_check.py
+++ /dev/null
@@ -1,47 +0,0 @@
-from resource_management import *
-
-class OozieServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-    
-    # on HDP2 this file is different
-    smoke_test_file_name = 'oozieSmoke.sh'
-
-    oozie_smoke_shell_file( smoke_test_file_name)
-  
-def oozie_smoke_shell_file(
-  file_name
-):
-  import params
-
-  File( format("/tmp/{file_name}"),
-    content = StaticFile(file_name),
-    mode = 0755
-  )
-  
-  if params.security_enabled:
-    sh_cmd = format("sh /tmp/{file_name} {conf_dir} {hadoop_conf_dir} {smokeuser} {security_enabled} {smokeuser_keytab} {kinit_path_local}")
-  else:
-    sh_cmd = format("sh /tmp/{file_name} {conf_dir} {hadoop_conf_dir} {smokeuser} {security_enabled}")
-
-  Execute( format("/tmp/{file_name}"),
-    command   = sh_cmd,
-    tries     = 3,
-    try_sleep = 5,
-    logoutput = True
-  )
-    
-def main():
-  import sys
-  command_type = 'service_check'
-  command_data_file = '/root/workspace/Oozie/input.json'
-  basedir = '/root/workspace/Oozie/main'
-  sys.argv = ["", command_type, command_data_file, basedir]
-  
-  OozieServiceCheck().execute()
-  
-if __name__ == "__main__":
-  OozieServiceCheck().execute()
-  #main()
-  

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/package/scripts/status_params.py
deleted file mode 100644
index c44fcf4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/package/scripts/status_params.py
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-oozie_pid_dir = config['configurations']['global']['oozie_pid_dir']
-pid_file = format("{oozie_pid_dir}/oozie.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/package/templates/oozie-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/package/templates/oozie-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/package/templates/oozie-env.sh.j2
deleted file mode 100644
index 270a1a8..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/package/templates/oozie-env.sh.j2
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/bin/bash
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-# 
-#      http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#Set JAVA HOME
-export JAVA_HOME={{java_home}}
-
-# Set Oozie specific environment variables here.
-
-# Settings for the Embedded Tomcat that runs Oozie
-# Java System properties for Oozie should be specified in this variable
-#
-# export CATALINA_OPTS=
-
-# Oozie configuration file to load from Oozie configuration directory
-#
-# export OOZIE_CONFIG_FILE=oozie-site.xml
-
-# Oozie logs directory
-#
-export OOZIE_LOG={{oozie_log_dir}}
-
-# Oozie pid directory
-#
-export CATALINA_PID={{pid_file}}
-
-#Location of the data for oozie
-export OOZIE_DATA={{oozie_data_dir}}
-
-# Oozie Log4J configuration file to load from Oozie configuration directory
-#
-# export OOZIE_LOG4J_FILE=oozie-log4j.properties
-
-# Reload interval of the Log4J configuration file, in seconds
-#
-# export OOZIE_LOG4J_RELOAD=10
-
-# The port Oozie server runs
-#
-# export OOZIE_HTTP_PORT=11000
-
-# The host name Oozie server runs on
-#
-# export OOZIE_HTTP_HOSTNAME=`hostname -f`
-
-# The base URL for callback URLs to Oozie
-#
-# export OOZIE_BASE_URL="http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie"
-export JAVA_LIBRARY_PATH=/usr/lib/hadoop/lib/native/Linux-amd64-64

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/package/templates/oozie-log4j.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/package/templates/oozie-log4j.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/package/templates/oozie-log4j.properties.j2
deleted file mode 100644
index e4a2662..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/package/templates/oozie-log4j.properties.j2
+++ /dev/null
@@ -1,74 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License. See accompanying LICENSE file.
-#
-
-# If the Java System property 'oozie.log.dir' is not defined at Oozie start up time
-# XLogService sets its value to '${oozie.home}/logs'
-
-log4j.appender.oozie=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozie.DatePattern='.'yyyy-MM-dd-HH
-log4j.appender.oozie.File=${oozie.log.dir}/oozie.log
-log4j.appender.oozie.Append=true
-log4j.appender.oozie.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozie.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.appender.oozieops=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozieops.DatePattern='.'yyyy-MM-dd
-log4j.appender.oozieops.File=${oozie.log.dir}/oozie-ops.log
-log4j.appender.oozieops.Append=true
-log4j.appender.oozieops.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozieops.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.appender.oozieinstrumentation=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozieinstrumentation.DatePattern='.'yyyy-MM-dd
-log4j.appender.oozieinstrumentation.File=${oozie.log.dir}/oozie-instrumentation.log
-log4j.appender.oozieinstrumentation.Append=true
-log4j.appender.oozieinstrumentation.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozieinstrumentation.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.appender.oozieaudit=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozieaudit.DatePattern='.'yyyy-MM-dd
-log4j.appender.oozieaudit.File=${oozie.log.dir}/oozie-audit.log
-log4j.appender.oozieaudit.Append=true
-log4j.appender.oozieaudit.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozieaudit.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.appender.openjpa=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.openjpa.DatePattern='.'yyyy-MM-dd
-log4j.appender.openjpa.File=${oozie.log.dir}/oozie-jpa.log
-log4j.appender.openjpa.Append=true
-log4j.appender.openjpa.layout=org.apache.log4j.PatternLayout
-log4j.appender.openjpa.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.logger.openjpa=INFO, openjpa
-log4j.logger.oozieops=INFO, oozieops
-log4j.logger.oozieinstrumentation=ALL, oozieinstrumentation
-log4j.logger.oozieaudit=ALL, oozieaudit
-log4j.logger.org.apache.oozie=INFO, oozie
-log4j.logger.org.apache.hadoop=WARN, oozie
-log4j.logger.org.mortbay=WARN, oozie
-log4j.logger.org.hsqldb=WARN, oozie
-log4j.logger.org.apache.hadoop.security.authentication.server=INFO, oozie

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/PIG/configuration/pig.properties
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/PIG/configuration/pig.properties b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/PIG/configuration/pig.properties
deleted file mode 100644
index 01000b5..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/PIG/configuration/pig.properties
+++ /dev/null
@@ -1,52 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Pig default configuration file. All values can be overwritten by pig.properties and command line arguments.
-# see bin/pig -help
-
-# brief logging (no timestamps)
-brief=false
-
-#debug level, INFO is default
-debug=INFO
-
-#verbose print all log messages to screen (default to print only INFO and above to screen)
-verbose=false
-
-#exectype local|mapreduce, mapreduce is default
-exectype=mapreduce
-
-#Enable insertion of information about script into hadoop job conf 
-pig.script.info.enabled=true
-
-#Do not spill temp files smaller than this size (bytes)
-pig.spill.size.threshold=5000000
-#EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)
-#This should help reduce the number of files being spilled.
-pig.spill.gc.activation.size=40000000
-
-#the following two parameters are to help estimate the reducer number
-pig.exec.reducers.bytes.per.reducer=1000000000
-pig.exec.reducers.max=999
-
-#Temporary location to store the intermediate data.
-pig.temp.dir=/tmp/
-
-#Threshold for merging FRJoin fragment files
-pig.files.concatenation.threshold=100
-pig.optimistic.files.concatenation=false;
-
-pig.disable.counter=false

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/PIG/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/PIG/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/PIG/metainfo.xml
deleted file mode 100644
index 9fb2c06..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/PIG/metainfo.xml
+++ /dev/null
@@ -1,61 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>PIG</name>
-      <comment>Scripting platform for analyzing large datasets</comment>
-      <version>0.11.1.1.3.3.0</version>
-      <components>
-        <component>
-          <name>PIG</name>
-          <category>CLIENT</category>
-          <cardinality>0+</cardinality>
-          <commandScript>
-            <script>scripts/pig_client.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osType>centos6</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>pig</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <configuration-dependencies>
-        <config-type>global</config-type>
-      </configuration-dependencies>
-
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/PIG/package/files/pigSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/PIG/package/files/pigSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/PIG/package/files/pigSmoke.sh
deleted file mode 100644
index a22456e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/PIG/package/files/pigSmoke.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-/*Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License */
-
-A = load 'passwd' using PigStorage(':');
-B = foreach A generate \$0 as id;
-store B into 'pigsmoke.out';

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/PIG/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/PIG/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/PIG/package/scripts/params.py
deleted file mode 100644
index 86e962c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/PIG/package/scripts/params.py
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-# server configurations
-config = Script.get_config()
-
-pig_conf_dir = "/etc/pig/conf"
-hadoop_conf_dir = "/etc/hadoop/conf"
-hdfs_user = config['configurations']['global']['hdfs_user']
-smokeuser = config['configurations']['global']['smokeuser']
-user_group = config['configurations']['global']['user_group']
-
-# not supporting 32 bit jdk.
-java64_home = config['hostLevelParams']['java_home']
-hadoop_home = "/usr"
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/PIG/package/scripts/pig.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/PIG/package/scripts/pig.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/PIG/package/scripts/pig.py
deleted file mode 100644
index c2d7b02..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/PIG/package/scripts/pig.py
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-def pig():
-  import params
-
-  Directory( params.pig_conf_dir,
-    owner = params.hdfs_user,
-    group = params.user_group
-  )
-
-  pig_TemplateConfig( ['pig-env.sh','pig.properties','log4j.properties'])
-  
-  
-def pig_TemplateConfig(name):
-  import params
-  
-  if not isinstance(name, list):
-    name = [name]
-    
-  for x in name:
-    TemplateConfig( format("{pig_conf_dir}/{x}"),
-        owner = params.hdfs_user
-    )
-  
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/PIG/package/scripts/pig_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/PIG/package/scripts/pig_client.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/PIG/package/scripts/pig_client.py
deleted file mode 100644
index acd0cb1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/PIG/package/scripts/pig_client.py
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-from resource_management import *
-from pig import pig
-
-         
-class PigClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-    
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    pig()
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-    
-#for tests
-def main():
-  command_type = 'install'
-  command_data_file = '/root/workspace/Pig/input.json'
-  basedir = '/root/workspace/Pig/main'
-  sys.argv = ["", command_type, command_data_file, basedir]
-  
-  PigClient().execute()
-  
-if __name__ == "__main__":
-  #main()
-  PigClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/PIG/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/PIG/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/PIG/package/scripts/service_check.py
deleted file mode 100644
index 3cca087..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/PIG/package/scripts/service_check.py
+++ /dev/null
@@ -1,75 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-class PigServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-    
-    input_file = 'passwd'
-    output_file = "pigsmoke.out"
-  
-    cleanup_cmd = format("dfs -rmr {output_file} {input_file}")
-    #cleanup put below to handle retries; if retrying there wil be a stale file that needs cleanup; exit code is fn of second command
-    create_file_cmd = format("{cleanup_cmd}; hadoop dfs -put /etc/passwd {input_file} ") #TODO: inconsistent that second command needs hadoop
-    test_cmd = format("fs -test -e {output_file}")
-  
-    ExecuteHadoop( create_file_cmd,
-      tries     = 3,
-      try_sleep = 5,
-      user      = params.smokeuser,
-      conf_dir = params.hadoop_conf_dir
-    )
-  
-    File( '/tmp/pigSmoke.sh',
-      content = StaticFile("pigSmoke.sh"),
-      mode = 0755
-    )
-  
-    Execute( "pig /tmp/pigSmoke.sh",
-      tries     = 3,
-      try_sleep = 5,
-      path      = '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-      user      = params.smokeuser,
-      logoutput = True
-    )
-  
-    ExecuteHadoop( test_cmd,
-      user      = params.smokeuser,
-      conf_dir = params.hadoop_conf_dir
-    )
-    
-def main():
-  import sys
-  command_type = 'service_check'
-  command_data_file = '/root/workspace/Pig/input.json'
-  basedir = '/root/workspace/Pig/main'
-  sys.argv = ["", command_type, command_data_file, basedir]
-  
-  PigServiceCheck().execute()
-  
-if __name__ == "__main__":
-  #main()
-  PigServiceCheck().execute()
-  

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/PIG/package/templates/log4j.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/PIG/package/templates/log4j.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/PIG/package/templates/log4j.properties.j2
deleted file mode 100644
index 9ef6e2c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/PIG/package/templates/log4j.properties.j2
+++ /dev/null
@@ -1,30 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-# ***** Set root logger level to DEBUG and its only appender to A.
-log4j.logger.org.apache.pig=info, A
-
-# ***** A is set to be a ConsoleAppender.
-log4j.appender.A=org.apache.log4j.ConsoleAppender
-# ***** A uses PatternLayout.
-log4j.appender.A.layout=org.apache.log4j.PatternLayout
-log4j.appender.A.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/PIG/package/templates/pig-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/PIG/package/templates/pig-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/PIG/package/templates/pig-env.sh.j2
deleted file mode 100644
index b0e17d4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/PIG/package/templates/pig-env.sh.j2
+++ /dev/null
@@ -1,17 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-JAVA_HOME={{java64_home}}
-HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/PIG/package/templates/pig.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/PIG/package/templates/pig.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/PIG/package/templates/pig.properties.j2
deleted file mode 100644
index 6fcb233..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/PIG/package/templates/pig.properties.j2
+++ /dev/null
@@ -1,55 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-# Pig configuration file. All values can be overwritten by command line arguments.
-
-# log4jconf log4j configuration file
-# log4jconf=./conf/log4j.properties
-
-# a file that contains pig script
-#file=
-
-# load jarfile, colon separated
-#jar=
-
-#verbose print all log messages to screen (default to print only INFO and above to screen)
-#verbose=true
-
-#exectype local|mapreduce, mapreduce is default
-#exectype=local
-
-#pig.logfile=
-
-#Do not spill temp files smaller than this size (bytes)
-#pig.spill.size.threshold=5000000
-#EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)
-#This should help reduce the number of files being spilled.
-#pig.spill.gc.activation.size=40000000
-
-#the following two parameters are to help estimate the reducer number
-#pig.exec.reducers.bytes.per.reducer=1000000000
-#pig.exec.reducers.max=999
-
-#Use this option only when your Pig job will otherwise die because of
-#using more counter than hadoop configured limit
-#pig.disable.counter=true
-hcat.bin=/usr/bin/hcat

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/SQOOP/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/SQOOP/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/SQOOP/metainfo.xml
deleted file mode 100644
index 426bb25..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/SQOOP/metainfo.xml
+++ /dev/null
@@ -1,77 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>SQOOP</name>
-      <comment>Tool for transferring bulk data between Apache Hadoop and
-        structured data stores such as relational databases
-      </comment>
-      <version>1.4.3.1.3.3.0</version>
-
-      <components>
-        <component>
-          <name>SQOOP</name>
-          <category>CLIENT</category>
-          <cardinality>0+</cardinality>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>MAPREDUCE/MAPREDUCE_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/sqoop_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>sqoop</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>mysql-connector-java</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/SQOOP/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/SQOOP/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/SQOOP/package/scripts/__init__.py
deleted file mode 100644
index 3860581..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/SQOOP/package/scripts/__init__.py
+++ /dev/null
@@ -1,18 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/SQOOP/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/SQOOP/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/SQOOP/package/scripts/params.py
deleted file mode 100644
index 8f7eb21..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/SQOOP/package/scripts/params.py
+++ /dev/null
@@ -1,36 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-security_enabled = config['configurations']['global']['security_enabled']
-smokeuser = config['configurations']['global']['smokeuser']
-user_group = config['configurations']['global']['user_group']
-
-sqoop_conf_dir = "/usr/lib/sqoop/conf"
-hbase_home = "/usr"
-hive_home = "/usr"
-zoo_conf_dir = "/etc/zookeeper"
-sqoop_lib = "/usr/lib/sqoop/lib"
-sqoop_user = "sqoop"
-
-keytab_path = config['configurations']['global']['keytab_path']
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/SQOOP/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/SQOOP/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/SQOOP/package/scripts/service_check.py
deleted file mode 100644
index b872be6..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/SQOOP/package/scripts/service_check.py
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-
-from resource_management import *
-
-
-class SqoopServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-    if params.security_enabled:
-        Execute(format("{kinit_path_local}  -kt {smoke_user_keytab} {smokeuser}"))
-    Execute("sqoop version",
-            user = params.smokeuser,
-            logoutput = True
-    )
-
-if __name__ == "__main__":
-  SqoopServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/SQOOP/package/scripts/sqoop.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/SQOOP/package/scripts/sqoop.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/SQOOP/package/scripts/sqoop.py
deleted file mode 100644
index 492550e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/SQOOP/package/scripts/sqoop.py
+++ /dev/null
@@ -1,51 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-from resource_management import *
-import sys
-
-def sqoop(type=None):
-  import params
-  Link(params.sqoop_lib + "/mysql-connector-java.jar",
-       to = '/usr/share/java/mysql-connector-java.jar'
-  )
-  Directory(params.sqoop_conf_dir,
-            owner = params.sqoop_user,
-            group = params.user_group
-  )
-  sqoop_TemplateConfig("sqoop-env.sh")
-  File (params.sqoop_conf_dir + "/sqoop-env-template.sh",
-          owner = params.sqoop_user,
-          group = params.user_group
-  )
-  File (params.sqoop_conf_dir + "/sqoop-site-template.xml",
-         owner = params.sqoop_user,
-         group = params.user_group
-  )
-  File (params.sqoop_conf_dir + "/sqoop-site.xml",
-         owner = params.sqoop_user,
-         group = params.user_group
-  )
-  pass
-
-def sqoop_TemplateConfig(name, tag=None):
-  import params
-  TemplateConfig( format("{sqoop_conf_dir}/{name}"),
-                  owner = params.sqoop_user,
-                  template_tag = tag
-  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/SQOOP/package/scripts/sqoop_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/SQOOP/package/scripts/sqoop_client.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/SQOOP/package/scripts/sqoop_client.py
deleted file mode 100644
index bd2863c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/SQOOP/package/scripts/sqoop_client.py
+++ /dev/null
@@ -1,40 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import sys
-from resource_management import *
-
-from sqoop import sqoop
-
-
-class SqoopClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    sqoop(type='client')
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-if __name__ == "__main__":
-  SqoopClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/SQOOP/package/templates/sqoop-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/SQOOP/package/templates/sqoop-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/SQOOP/package/templates/sqoop-env.sh.j2
deleted file mode 100644
index 90cbc75..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/SQOOP/package/templates/sqoop-env.sh.j2
+++ /dev/null
@@ -1,36 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# included in all the hadoop scripts with source command
-# should not be executable directly
-# also should not be passed any arguments, since we need original $*
-
-# Set Hadoop-specific environment variables here.
-
-#Set path to where bin/hadoop is available
-#Set path to where bin/hadoop is available
-export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
-
-#set the path to where bin/hbase is available
-export HBASE_HOME=${HBASE_HOME:-/usr/lib/hbase}
-
-#Set the path to where bin/hive is available
-export HIVE_HOME=${HIVE_HOME:-/usr/lib/hive}
-
-#Set the path for where zookeper config dir is
-export ZOOCFGDIR=${ZOOCFGDIR:-/etc/zookeeper/conf}
-
-# add libthrift in hive to sqoop class path first so hive imports work
-export SQOOP_USER_CLASSPATH="`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:${SQOOP_USER_CLASSPATH}"

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/WEBHCAT/configuration/webhcat-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/WEBHCAT/configuration/webhcat-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/WEBHCAT/configuration/webhcat-site.xml
deleted file mode 100644
index 16d8691..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/WEBHCAT/configuration/webhcat-site.xml
+++ /dev/null
@@ -1,126 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- 
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<!-- The default settings for Templeton. -->
-<!-- Edit templeton-site.xml to change settings for your local -->
-<!-- install. -->
-
-<configuration>
-
-  <property>
-    <name>templeton.port</name>
-      <value>50111</value>
-    <description>The HTTP port for the main server.</description>
-  </property>
-
-  <property>
-    <name>templeton.hadoop.conf.dir</name>
-    <value>/etc/hadoop/conf</value>
-    <description>The path to the Hadoop configuration.</description>
-  </property>
-
-  <property>
-    <name>templeton.jar</name>
-    <value>/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar</value>
-    <description>The path to the Templeton jar file.</description>
-  </property>
-
-  <property>
-    <name>templeton.libjars</name>
-    <value>/usr/lib/zookeeper/zookeeper.jar</value>
-    <description>Jars to add the the classpath.</description>
-  </property>
-
-
-  <property>
-    <name>templeton.hadoop</name>
-    <value>/usr/bin/hadoop</value>
-    <description>The path to the Hadoop executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.pig.archive</name>
-    <value>hdfs:///apps/webhcat/pig.tar.gz</value>
-    <description>The path to the Pig archive.</description>
-  </property>
-
-  <property>
-    <name>templeton.pig.path</name>
-    <value>pig.tar.gz/pig/bin/pig</value>
-    <description>The path to the Pig executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hcat</name>
-    <value>/usr/bin/hcat</value>
-    <description>The path to the hcatalog executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.archive</name>
-    <value>hdfs:///apps/webhcat/hive.tar.gz</value>
-    <description>The path to the Hive archive.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.path</name>
-    <value>hive.tar.gz/hive/bin/hive</value>
-    <description>The path to the Hive executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.properties</name>
-    <value></value>
-    <description>Properties to set when running hive.</description>
-  </property>
-
-
-  <property>
-    <name>templeton.zookeeper.hosts</name>
-    <value>localhost:2181</value>
-    <description>ZooKeeper servers, as comma separated host:port pairs</description>
-  </property>
-
-  <property>
-    <name>templeton.storage.class</name>
-    <value>org.apache.hcatalog.templeton.tool.ZooKeeperStorage</value>
-    <description>The class to use as storage</description>
-  </property>
-
-  <property>
-   <name>templeton.override.enabled</name>
-   <value>false</value>
-   <description>
-     Enable the override path in templeton.override.jars
-   </description>
- </property>
-
- <property>
-    <name>templeton.streaming.jar</name>
-    <value>hdfs:///apps/webhcat/hadoop-streaming.jar</value>
-    <description>The hdfs path to the Hadoop streaming jar file.</description>
-  </property> 
-
-  <property>
-    <name>templeton.exec.timeout</name>
-    <value>60000</value>
-    <description>Time out for templeton api</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/WEBHCAT/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/WEBHCAT/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/WEBHCAT/metainfo.xml
deleted file mode 100644
index d6c2a1f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/WEBHCAT/metainfo.xml
+++ /dev/null
@@ -1,97 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>WEBHCAT</name>
-      <comment>This is comment for WEBHCAT service</comment>
-      <version>0.11.0.1.3.3.0</version>
-      <components>
-        <component>
-          <name>WEBHCAT_SERVER</name>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>MAPREDUCE/MAPREDUCE_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
-              <scope>cluster</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-                <co-locate>WEBHCAT/WEBHCAT_SERVER</co-locate>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>ZOOKEEPER/ZOOKEEPER_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/webhcat_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>hcatalog</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>webhcat-tar-hive</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>webhcat-tar-pig</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-      <configuration-dependencies>
-        <config-type>webhcat-site</config-type>
-      </configuration-dependencies>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/WEBHCAT/package/files/templetonSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/WEBHCAT/package/files/templetonSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/WEBHCAT/package/files/templetonSmoke.sh
deleted file mode 100644
index cefc4f0..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/WEBHCAT/package/files/templetonSmoke.sh
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-export ttonhost=$1
-export smoke_test_user=$2
-export smoke_user_keytab=$3
-export security_enabled=$4
-export kinit_path_local=$5
-export ttonurl="http://${ttonhost}:50111/templeton/v1"
-
-if [[ $security_enabled == "true" ]]; then
-  kinitcmd="${kinit_path_local}  -kt ${smoke_user_keytab} ${smoke_test_user}; "
-else
-  kinitcmd=""
-fi
-
-cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>'    $ttonurl/status 2>&1"
-retVal=`su - ${smoke_test_user} -c "$cmd"`
-httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
-
-if [[ "$httpExitCode" -ne "200" ]] ; then
-  echo "Templeton Smoke Test (status cmd): Failed. : $retVal"
-  export TEMPLETON_EXIT_CODE=1
-  exit 1
-fi
-
-exit 0
-
-#try hcat ddl command
-echo "user.name=${smoke_test_user}&exec=show databases;" /tmp/show_db.post.txt
-cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>' -d  \@${destdir}/show_db.post.txt  $ttonurl/ddl 2>&1"
-retVal=`su - ${smoke_test_user} -c "$cmd"`
-httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
-
-if [[ "$httpExitCode" -ne "200" ]] ; then
-  echo "Templeton Smoke Test (ddl cmd): Failed. : $retVal"
-  export TEMPLETON_EXIT_CODE=1
-  exit  1
-fi
-
-# NOT SURE?? SUHAS
-if [[ $security_enabled == "true" ]]; then
-  echo "Templeton Pig Smoke Tests not run in secure mode"
-  exit 0
-fi
-
-#try pig query
-outname=${smoke_test_user}.`date +"%M%d%y"`.$$;
-ttonTestOutput="/tmp/idtest.${outname}.out";
-ttonTestInput="/tmp/idtest.${outname}.in";
-ttonTestScript="idtest.${outname}.pig"
-
-echo "A = load '$ttonTestInput' using PigStorage(':');"  > /tmp/$ttonTestScript
-echo "B = foreach A generate \$0 as id; " >> /tmp/$ttonTestScript
-echo "store B into '$ttonTestOutput';" >> /tmp/$ttonTestScript
-
-#copy pig script to hdfs
-su - ${smoke_test_user} -c "hadoop dfs -copyFromLocal /tmp/$ttonTestScript /tmp/$ttonTestScript"
-
-#copy input file to hdfs
-su - ${smoke_test_user} -c "hadoop dfs -copyFromLocal /etc/passwd $ttonTestInput"
-
-#create, copy post args file
-echo -n "user.name=${smoke_test_user}&file=/tmp/$ttonTestScript" > /tmp/pig_post.txt
-
-#submit pig query
-cmd="curl -s -w 'http_code <%{http_code}>' -d  \@${destdir}/pig_post.txt  $ttonurl/pig 2>&1"
-retVal=`su - ${smoke_test_user} -c "$cmd"`
-httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
-if [[ "$httpExitCode" -ne "200" ]] ; then
-  echo "Templeton Smoke Test (pig cmd): Failed. : $retVal"
-  export TEMPLETON_EXIT_CODE=1
-  exit 1
-fi
-
-exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/WEBHCAT/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/WEBHCAT/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/WEBHCAT/package/scripts/__init__.py
deleted file mode 100644
index a582077..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/WEBHCAT/package/scripts/__init__.py
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/WEBHCAT/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/WEBHCAT/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/WEBHCAT/package/scripts/params.py
deleted file mode 100644
index 60b52a7..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/WEBHCAT/package/scripts/params.py
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-import status_params
-
-# server configurations
-config = Script.get_config()
-
-webhcat_user = config['configurations']['global']['webhcat_user']
-download_url = config['configurations']['global']['apache_artifacts_download_url']
-
-config_dir = '/etc/hcatalog/conf'
-
-templeton_log_dir = config['configurations']['global']['hcat_log_dir']
-templeton_pid_dir = status_params.templeton_pid_dir
-
-pid_file = status_params.pid_file
-
-hadoop_conf_dir = config['configurations']['webhcat-site']['templeton.hadoop.conf.dir']
-templeton_jar = config['configurations']['webhcat-site']['templeton.jar']
-
-hadoop_home = '/usr'
-user_group = config['configurations']['global']['user_group']
-
-webhcat_server_host = config['clusterHostInfo']['webhcat_server_host']
-
-webhcat_apps_dir = "/apps/webhcat"
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-smokeuser = config['configurations']['global']['smokeuser']
-security_enabled = config['configurations']['global']['security_enabled']
-kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/WEBHCAT/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/WEBHCAT/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/WEBHCAT/package/scripts/service_check.py
deleted file mode 100644
index 58b4d25..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/WEBHCAT/package/scripts/service_check.py
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-from resource_management import *
-
-class WebHCatServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    File('/tmp/templetonSmoke.sh',
-         content= StaticFile('templetonSmoke.sh'),
-         mode=0755
-    )
-
-    cmd = format("sh /tmp/templetonSmoke.sh {webhcat_server_host[0]} {smokeuser} {smokeuser_keytab}"
-                 " {security_enabled} {kinit_path_local}",
-                 smokeuser_keytab=params.smoke_user_keytab if params.security_enabled else "no_keytab")
-
-    Execute(cmd,
-            tries=3,
-            try_sleep=5,
-            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-            logoutput=True)
-
-if __name__ == "__main__":
-  WebHCatServiceCheck().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/WEBHCAT/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/WEBHCAT/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/WEBHCAT/package/scripts/status_params.py
deleted file mode 100644
index 21dde6f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/WEBHCAT/package/scripts/status_params.py
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-templeton_pid_dir = config['configurations']['global']['hcat_pid_dir']
-pid_file = format('{templeton_pid_dir}/webhcat.pid')

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/WEBHCAT/package/scripts/webhcat.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/WEBHCAT/package/scripts/webhcat.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/WEBHCAT/package/scripts/webhcat.py
deleted file mode 100644
index ae12f54..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/WEBHCAT/package/scripts/webhcat.py
+++ /dev/null
@@ -1,120 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-from resource_management import *
-import sys
-
-
-def webhcat():
-  import params
-
-  Directory(params.templeton_pid_dir,
-            owner=params.webhcat_user,
-            mode=0755,
-            group=params.user_group,
-            recursive=True)
-
-  Directory(params.templeton_log_dir,
-            owner=params.webhcat_user,
-            mode=0755,
-            group=params.user_group,
-            recursive=True)
-
-  Directory(params.config_dir,
-            owner=params.webhcat_user,
-            group=params.user_group)
-
-  XmlConfig("webhcat-site.xml",
-            conf_dir=params.config_dir,
-            configurations=params.config['configurations']['webhcat-site'],
-            owner=params.webhcat_user,
-            group=params.user_group,
-  )
-
-  File(format("{config_dir}/webhcat-env.sh"),
-       owner=params.webhcat_user,
-       group=params.user_group,
-       content=Template('webhcat-env.sh.j2')
-  )
-
-  if params.security_enabled:
-    kinit_if_needed = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser};")
-  else:
-    kinit_if_needed = ""
-
-  if kinit_if_needed:
-    Execute(kinit_if_needed,
-            user=params.webhcat_user,
-            path='/bin'
-    )
-
-  copyFromLocal(path='/usr/lib/hadoop/contrib/streaming/hadoop-streaming*.jar',
-                owner=params.webhcat_user,
-                mode=0755,
-                dest_dir=format("{webhcat_apps_dir}/hadoop-streaming.jar"),
-                kinnit_if_needed=kinit_if_needed
-  )
-
-  copyFromLocal(path='/usr/share/HDP-webhcat/pig.tar.gz',
-                owner=params.webhcat_user,
-                mode=0755,
-                dest_dir=format("{webhcat_apps_dir}/pig.tar.gz"),
-  )
-
-  copyFromLocal(path='/usr/share/HDP-webhcat/hive.tar.gz',
-                owner=params.webhcat_user,
-                mode=0755,
-                dest_dir=format("{webhcat_apps_dir}/hive.tar.gz")
-  )
-
-
-def copyFromLocal(path=None, owner=None, group=None, mode=None, dest_dir=None, kinnit_if_needed=""):
-  import params
-
-  copy_cmd = format("fs -copyFromLocal {path} {dest_dir}")
-  unless_cmd = format("{kinnit_if_needed} hadoop fs -ls {dest_dir} >/dev/null 2>&1")
-
-  ExecuteHadoop(copy_cmd,
-                not_if=unless_cmd,
-                user=owner,
-                conf_dir=params.hadoop_conf_dir)
-
-  if not owner:
-    chown = None
-  else:
-    if not group:
-      chown = owner
-    else:
-      chown = format('{owner}:{group}')
-
-  if not chown:
-    chown_cmd = format("fs -chown {chown} {dest_dir}")
-
-    ExecuteHadoop(copy_cmd,
-                  user=owner,
-                  conf_dir=params.hadoop_conf_dir)
-
-  if not mode:
-    chmod_cmd = format('fs -chmod {mode} {dest_dir}')
-
-    ExecuteHadoop(chmod_cmd,
-                  user=owner,
-                  conf_dir=params.hadoop_conf_dir)

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/WEBHCAT/package/scripts/webhcat_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/WEBHCAT/package/scripts/webhcat_server.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/WEBHCAT/package/scripts/webhcat_server.py
deleted file mode 100644
index 4365111..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/WEBHCAT/package/scripts/webhcat_server.py
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-import sys
-from resource_management import *
-
-from webhcat import webhcat
-from webhcat_service import webhcat_service
-
-class WebHCatServer(Script):
-  def install(self, env):
-    self.install_packages(env)
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    webhcat()
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env) # FOR SECURITY
-    webhcat_service(action = 'start')
-
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    webhcat_service(action = 'stop')
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.pid_file)
-
-if __name__ == "__main__":
-  WebHCatServer().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/WEBHCAT/package/scripts/webhcat_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/WEBHCAT/package/scripts/webhcat_service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/WEBHCAT/package/scripts/webhcat_service.py
deleted file mode 100644
index 12c3854..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/WEBHCAT/package/scripts/webhcat_service.py
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-from resource_management import *
-
-def webhcat_service(action='start'):
-  import params
-
-  cmd = format('env HADOOP_HOME={hadoop_home} /usr/lib/hcatalog/sbin/webhcat_server.sh')
-
-  if action == 'start':
-    demon_cmd = format('{cmd} start')
-    no_op_test = format('ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1')
-    Execute(demon_cmd,
-            user=params.webhcat_user,
-            not_if=no_op_test
-    )
-  elif action == 'stop':
-    demon_cmd = format('{cmd} stop')
-    Execute(demon_cmd,
-            user=params.webhcat_user
-    )
-    Execute(format('rm -f {pid_file}'))

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/WEBHCAT/package/templates/webhcat-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/WEBHCAT/package/templates/webhcat-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/WEBHCAT/package/templates/webhcat-env.sh.j2
deleted file mode 100644
index 9ea4a79..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/WEBHCAT/package/templates/webhcat-env.sh.j2
+++ /dev/null
@@ -1,44 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-# The file containing the running pid
-PID_FILE={{pid_file}}
-
-TEMPLETON_LOG_DIR={{templeton_log_dir}}/
-
-
-WEBHCAT_LOG_DIR={{templeton_log_dir}}/
-
-# The console error log
-ERROR_LOG={{templeton_log_dir}}/webhcat-console-error.log
-
-# The console log
-CONSOLE_LOG={{templeton_log_dir}}/webhcat-console.log
-
-#TEMPLETON_JAR=templeton_jar_name
-
-#HADOOP_PREFIX=hadoop_prefix
-
-#HCAT_PREFIX=hive_prefix
-
-# Set HADOOP_HOME to point to a specific hadoop install directory
-export HADOOP_HOME=/usr/lib/hadoop

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/configuration/global.xml
deleted file mode 100644
index f78df89..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/ZOOKEEPER/configuration/global.xml
+++ /dev/null
@@ -1,75 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>zk_user</name>
-    <value>zookeeper</value>
-    <description>ZooKeeper User.</description>
-  </property>
-  <property>
-    <name>zookeeperserver_host</name>
-    <value></value>
-    <description>ZooKeeper Server Hosts.</description>
-  </property>
-  <property>
-    <name>zk_data_dir</name>
-    <value>/hadoop/zookeeper</value>
-    <description>Data directory for ZooKeeper.</description>
-  </property>
-  <property>
-    <name>zk_log_dir</name>
-    <value>/var/log/zookeeper</value>
-    <description>ZooKeeper Log Dir</description>
-  </property>
-  <property>
-    <name>zk_pid_dir</name>
-    <value>/var/run/zookeeper</value>
-    <description>ZooKeeper Pid Dir</description>
-  </property>
-  <property>
-    <name>zk_pid_file</name>
-    <value>/var/run/zookeeper/zookeeper_server.pid</value>
-    <description>ZooKeeper Pid File</description>
-  </property>
-  <property>
-    <name>tickTime</name>
-    <value>2000</value>
-    <description>The length of a single tick in milliseconds, which is the basic time unit used by ZooKeeper</description>
-  </property>
-  <property>
-    <name>initLimit</name>
-    <value>10</value>
-    <description>Ticks to allow for sync at Init.</description>
-  </property>
-  <property>
-    <name>syncLimit</name>
-    <value>5</value>
-    <description>Ticks to allow for sync at Runtime.</description>
-  </property>
-  <property>
-    <name>clientPort</name>
-    <value>2181</value>
-    <description>Port for running ZK Server.</description>
-  </property>
-
-</configuration>


[09/12] AMBARI-4336. Move 1.3.4 stack to 1.3.3 using the python libraries. (mahadev)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/stopGmond.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/stopGmond.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/stopGmond.sh
deleted file mode 100644
index 1af3eb9..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/stopGmond.sh
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
-source ./gmondLib.sh;
-
-function stopGmondForCluster()
-{
-    gmondClusterName=${1};
-
-    gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
-
-    # Only go ahead with the termination if we could find a running PID.
-    if [ -n "${gmondRunningPid}" ]
-    then
-      kill -KILL ${gmondRunningPid};
-      echo "Stopped ${GMOND_BIN} for cluster ${gmondClusterName} (with PID ${gmondRunningPid})";
-    fi
-}
-
-# main()
-gmondClusterName=${1};
-
-if [ "x" == "x${gmondClusterName}" ]
-then
-    # No ${gmondClusterName} passed in as command-line arg, so stop
-    # all the gmonds we know about.
-    for gmondClusterName in `getConfiguredGangliaClusterNames`
-    do
-        stopGmondForCluster ${gmondClusterName};
-    done
-else
-    stopGmondForCluster ${gmondClusterName};
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/stopRrdcached.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/stopRrdcached.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/stopRrdcached.sh
deleted file mode 100644
index 0a0d8d8..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/stopRrdcached.sh
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./rrdcachedLib.sh;
-
-rrdcachedRunningPid=`getRrdcachedRunningPid`;
-
-# Only go ahead with the termination if we could find a running PID.
-if [ -n "${rrdcachedRunningPid}" ]
-then
-    kill -TERM ${rrdcachedRunningPid};
-    # ${RRDCACHED_BIN} takes a few seconds to drain its buffers, so wait 
-    # until we're sure it's well and truly dead. 
-    #
-    # Without this, an immediately following startRrdcached.sh won't do
-    # anything, because it still sees this soon-to-die instance alive,
-    # and the net result is that after a few seconds, there's no
-    # ${RRDCACHED_BIN} running on the box anymore.
-    sleep 5;
-    echo "Stopped ${RRDCACHED_BIN} (with PID ${rrdcachedRunningPid})";
-fi 

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/teardownGanglia.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/teardownGanglia.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/teardownGanglia.sh
deleted file mode 100644
index b27f7a2..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/files/teardownGanglia.sh
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get access to Ganglia-wide constants, utilities etc.
-source ./gangliaLib.sh;
-
-# Undo what we did while setting up Ganglia on this box.
-rm -rf ${GANGLIA_CONF_DIR};
-rm -rf ${GANGLIA_RUNTIME_DIR};

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/scripts/ganglia.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/scripts/ganglia.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/scripts/ganglia.py
deleted file mode 100644
index 1eae6d0..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/scripts/ganglia.py
+++ /dev/null
@@ -1,106 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-from resource_management import *
-import os
-
-
-def groups_and_users():
-  import params
-
-  Group(params.user_group)
-  Group(params.gmetad_user)
-  Group(params.gmond_user)
-  User(params.gmond_user,
-       groups=[params.gmond_user])
-  User(params.gmetad_user,
-       groups=[params.gmetad_user])
-
-
-def config():
-  import params
-
-  shell_cmds_dir = params.ganglia_shell_cmds_dir
-  shell_files = ['checkGmond.sh', 'checkRrdcached.sh', 'gmetadLib.sh',
-                 'gmondLib.sh', 'rrdcachedLib.sh',
-                 'setupGanglia.sh', 'startGmetad.sh', 'startGmond.sh',
-                 'startRrdcached.sh', 'stopGmetad.sh',
-                 'stopGmond.sh', 'stopRrdcached.sh', 'teardownGanglia.sh']
-  Directory(shell_cmds_dir,
-            owner="root",
-            group="root",
-            recursive=True
-  )
-  init_file("gmetad")
-  init_file("gmond")
-  for sh_file in shell_files:
-    shell_file(sh_file)
-  for conf_file in ['gangliaClusters.conf', 'gangliaEnv.sh', 'gangliaLib.sh']:
-    ganglia_TemplateConfig(conf_file)
-
-
-def init_file(name):
-  import params
-
-  File("/etc/init.d/hdp-" + name,
-       content=StaticFile(name + ".init"),
-       mode=0755
-  )
-
-
-def shell_file(name):
-  import params
-
-  File(params.ganglia_shell_cmds_dir + os.sep + name,
-       content=StaticFile(name),
-       mode=0755
-  )
-
-
-def ganglia_TemplateConfig(name, mode=755, tag=None):
-  import params
-
-  TemplateConfig(format("{params.ganglia_shell_cmds_dir}/{name}"),
-                 owner="root",
-                 group="root",
-                 template_tag=tag,
-                 mode=mode
-  )
-
-
-def generate_daemon(ganglia_service,
-                    name=None,
-                    role=None,
-                    owner=None,
-                    group=None):
-  import params
-
-  cmd = ""
-  if ganglia_service == "gmond":
-    if role == "server":
-      cmd = "{params.ganglia_shell_cmds_dir}/setupGanglia.sh -c {name} -m -o {owner} -g {group}"
-    else:
-      cmd = "{params.ganglia_shell_cmds_dir}/setupGanglia.sh -c {name} -o {owner} -g {group}"
-  elif ganglia_service == "gmetad":
-    cmd = "{params.ganglia_shell_cmds_dir}/setupGanglia.sh -t -o {owner} -g {group}"
-  else:
-    raise Fail("Unexpected ganglia service")
-  Execute(format(cmd),
-          path=[params.ganglia_shell_cmds_dir, "/usr/sbin",
-                "/sbin:/usr/local/bin", "/bin", "/usr/bin"]
-  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/scripts/ganglia_monitor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/scripts/ganglia_monitor.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/scripts/ganglia_monitor.py
deleted file mode 100644
index bddecf6..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/scripts/ganglia_monitor.py
+++ /dev/null
@@ -1,163 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import sys
-import os
-from os import path
-from resource_management import *
-from ganglia import generate_daemon
-import ganglia
-import ganglia_monitor_service
-
-
-class GangliaMonitor(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env)
-    env.set_params(params)
-    self.config(env)
-
-  def start(self, env):
-    ganglia_monitor_service.monitor("start")
-
-  def stop(self, env):
-    ganglia_monitor_service.monitor("stop")
-
-
-  def status(self, env):
-    import status_params
-    pid_file_name = 'gmond.pid'
-    pid_file_count = 0
-    pid_dir = status_params.pid_dir
-    # Recursively check all existing gmond pid files
-    for cur_dir, subdirs, files in os.walk(pid_dir):
-      for file_name in files:
-        if file_name == pid_file_name:
-          pid_file = os.path.join(cur_dir, file_name)
-          check_process_status(pid_file)
-          pid_file_count += 1
-    if pid_file_count == 0: # If no any pid file is present
-      raise ComponentIsNotRunning()
-
-
-  def config(self, env):
-    import params
-
-    ganglia.groups_and_users()
-
-    Directory(params.ganglia_conf_dir,
-              owner="root",
-              group=params.user_group,
-              recursive=True
-    )
-
-    ganglia.config()
-
-    if params.is_namenode_master:
-      generate_daemon("gmond",
-                      name = "HDPNameNode",
-                      role = "monitor",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.is_jtnode_master:
-      generate_daemon("gmond",
-                      name = "HDPJobTracker",
-                      role = "monitor",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.is_rmnode_master:
-      generate_daemon("gmond",
-                      name = "HDPResourceManager",
-                      role = "monitor",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.is_hsnode_master:
-      generate_daemon("gmond",
-                      name = "HDPHistoryServer",
-                      role = "monitor",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.is_hbase_master:
-      generate_daemon("gmond",
-                      name = "HDPHBaseMaster",
-                      role = "monitor",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.is_hsnode_master:
-      generate_daemon("gmond",
-                      name = "HDPHistoryServer",
-                      role = "monitor",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.is_slave:
-      generate_daemon("gmond",
-                      name = "HDPDataNode",
-                      role = "monitor",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.is_tasktracker:
-      generate_daemon("gmond",
-                      name = "HDPTaskTracker",
-                      role = "monitor",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.is_hbase_rs:
-      generate_daemon("gmond",
-                      name = "HDPHBaseRegionServer",
-                      role = "monitor",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.is_flume:
-      generate_daemon("gmond",
-                      name = "HDPFlumeServer",
-                      role = "monitor",
-                      owner = "root",
-                      group = params.user_group)
-
-
-    Directory(path.join(params.ganglia_dir, "conf.d"),
-              owner="root",
-              group=params.user_group
-    )
-
-    File(path.join(params.ganglia_dir, "conf.d/modgstatus.conf"),
-         owner="root",
-         group=params.user_group
-    )
-    File(path.join(params.ganglia_dir, "conf.d/multicpu.conf"),
-         owner="root",
-         group=params.user_group
-    )
-    File(path.join(params.ganglia_dir, "gmond.conf"),
-         owner="root",
-         group=params.user_group
-    )
-
-
-if __name__ == "__main__":
-  GangliaMonitor().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/scripts/ganglia_monitor_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/scripts/ganglia_monitor_service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/scripts/ganglia_monitor_service.py
deleted file mode 100644
index d86d894..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/scripts/ganglia_monitor_service.py
+++ /dev/null
@@ -1,31 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-from resource_management import *
-
-
-def monitor(action=None):# 'start' or 'stop'
-  if action == "start":
-    Execute("chkconfig gmond off",
-            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    )
-  Execute(
-    format(
-      "service hdp-gmond {action} >> /tmp/gmond.log  2>&1 ; /bin/ps auwx | /bin/grep [g]mond  >> /tmp/gmond.log  2>&1"),
-    path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
-  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/scripts/ganglia_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/scripts/ganglia_server.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/scripts/ganglia_server.py
deleted file mode 100644
index e391562..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/scripts/ganglia_server.py
+++ /dev/null
@@ -1,181 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import sys
-import os
-from os import path
-from resource_management import *
-from ganglia import generate_daemon
-import ganglia
-import ganglia_server_service
-
-
-class GangliaServer(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env)
-    env.set_params(params)
-    self.config(env)
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-    ganglia_server_service.server("start")
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-    ganglia_server_service.server("stop")
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    pid_file = format("{pid_dir}/gmetad.pid")
-    # Recursively check all existing gmetad pid files
-    check_process_status(pid_file)
-
-  def config(self, env):
-    import params
-
-    ganglia.groups_and_users()
-    ganglia.config()
-
-    if params.has_namenodes:
-      generate_daemon("gmond",
-                      name = "HDPNameNode",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_jobtracker:
-      generate_daemon("gmond",
-                      name = "HDPJobTracker",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_hbase_masters:
-      generate_daemon("gmond",
-                      name = "HDPHBaseMaster",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_resourcemanager:
-      generate_daemon("gmond",
-                      name = "HDPResourceManager",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-    if params.has_historyserver:
-      generate_daemon("gmond",
-                      name = "HDPHistoryServer",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_slaves:
-      generate_daemon("gmond",
-                      name = "HDPDataNode",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_tasktracker:
-      generate_daemon("gmond",
-                      name = "HDPTaskTracker",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_hbase_rs:
-      generate_daemon("gmond",
-                      name = "HDPHBaseRegionServer",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_flume:
-      generate_daemon("gmond",
-                      name = "HDPFlumeServer",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-    generate_daemon("gmetad",
-                    name = "gmetad",
-                    role = "server",
-                    owner = "root",
-                    group = params.user_group)
-
-    change_permission()
-    server_files()
-    File(path.join(params.ganglia_dir, "gmetad.conf"),
-         owner="root",
-         group=params.user_group
-    )
-
-
-def change_permission():
-  import params
-
-  Directory('/var/lib/ganglia/dwoo',
-            mode=0777,
-            owner=params.gmetad_user,
-            recursive=True
-  )
-
-
-def server_files():
-  import params
-
-  rrd_py_path = params.rrd_py_path
-  Directory(rrd_py_path,
-            recursive=True
-  )
-  rrd_py_file_path = path.join(rrd_py_path, "rrd.py")
-  File(rrd_py_file_path,
-       content=StaticFile("rrd.py"),
-       mode=0755
-  )
-  rrd_file_owner = params.gmetad_user
-  if params.rrdcached_default_base_dir != params.rrdcached_base_dir:
-    Directory(params.rrdcached_base_dir,
-              owner=rrd_file_owner,
-              group=rrd_file_owner,
-              mode=0755,
-              recursive=True
-    )
-    Directory(params.rrdcached_default_base_dir,
-              action = "delete"
-    )
-    Link(params.rrdcached_default_base_dir,
-         to=params.rrdcached_base_dir
-    )
-  elif rrd_file_owner != 'nobody':
-    Directory(params.rrdcached_default_base_dir,
-              owner=rrd_file_owner,
-              group=rrd_file_owner,
-              recursive=True
-    )
-
-
-if __name__ == "__main__":
-  GangliaServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/scripts/ganglia_server_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/scripts/ganglia_server_service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/scripts/ganglia_server_service.py
deleted file mode 100644
index b93e3f8..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/scripts/ganglia_server_service.py
+++ /dev/null
@@ -1,27 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-from resource_management import *
-
-
-def server(action=None):# 'start' or 'stop'
-  command = "service hdp-gmetad {action} >> /tmp/gmetad.log  2>&1 ; /bin/ps auwx | /bin/grep [g]metad  >> /tmp/gmetad.log  2>&1"
-  Execute(format(command),
-          path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
-  )
-  MonitorWebserver("restart")

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/scripts/params.py
deleted file mode 100644
index 3700d0a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/scripts/params.py
+++ /dev/null
@@ -1,74 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-from resource_management import *
-from resource_management.core.system import System
-
-config = Script.get_config()
-
-user_group = config['configurations']['global']["user_group"]
-ganglia_conf_dir = config['configurations']['global']["ganglia_conf_dir"]
-ganglia_dir = "/etc/ganglia"
-ganglia_runtime_dir = config['configurations']['global']["ganglia_runtime_dir"]
-ganglia_shell_cmds_dir = "/usr/libexec/hdp/ganglia"
-
-gmetad_user = config['configurations']['global']["gmetad_user"]
-gmond_user = config['configurations']['global']["gmond_user"]
-
-webserver_group = "apache"
-rrdcached_default_base_dir = "/var/lib/ganglia/rrds"
-rrdcached_base_dir = config['configurations']['global']["rrdcached_base_dir"]
-
-ganglia_server_host = config["clusterHostInfo"]["ganglia_server_host"][0]
-
-hostname = config["hostname"]
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-jtnode_host = default("/clusterHostInfo/jtnode_host", [])
-rm_host = default("/clusterHostInfo/rm_host", [])
-hs_host = default("/clusterHostInfo/hs_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-# datanodes are marked as slave_hosts
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-tt_hosts = default("/clusterHostInfo/mapred_tt_hosts", [])
-hbase_rs_hosts = default("/clusterHostInfo/hbase_rs_hosts", [])
-flume_hosts = default("/clusterHostInfo/flume_hosts", [])
-
-is_namenode_master = hostname in namenode_host
-is_jtnode_master = hostname in jtnode_host
-is_rmnode_master = hostname in rm_host
-is_hsnode_master = hostname in hs_host
-is_hbase_master = hostname in hbase_master_hosts
-is_slave = hostname in slave_hosts
-is_tasktracker = hostname in tt_hosts
-is_hbase_rs = hostname in hbase_rs_hosts
-is_flume = hostname in flume_hosts
-
-has_namenodes = not len(namenode_host) == 0
-has_jobtracker = not len(jtnode_host) == 0
-has_resourcemanager = not len(rm_host) == 0
-has_historyserver = not len(hs_host) == 0
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_slaves = not len(slave_hosts) == 0
-has_tasktracker = not len(tt_hosts) == 0
-has_hbase_rs = not len(hbase_rs_hosts) == 0
-has_flume = not len(flume_hosts) == 0
-
-if System.get_instance().platform == "suse":
-  rrd_py_path = '/srv/www/cgi-bin'
-else:
-  rrd_py_path = '/var/www/cgi-bin'

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/scripts/status_params.py
deleted file mode 100644
index 3ccad2f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/scripts/status_params.py
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-pid_dir = config['configurations']['global']['ganglia_runtime_dir']

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/templates/gangliaClusters.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/templates/gangliaClusters.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/templates/gangliaClusters.conf.j2
deleted file mode 100644
index 23588a5..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/templates/gangliaClusters.conf.j2
+++ /dev/null
@@ -1,34 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-#########################################################
-### ClusterName           GmondMasterHost   GmondPort ###
-#########################################################
-
-    HDPJournalNode      {{ganglia_server_host}}   8654
-    HDPFlumeServer      {{ganglia_server_host}}   8655
-    HDPHBaseRegionServer       	{{ganglia_server_host}}   8656
-    HDPNodeManager     	{{ganglia_server_host}}   8657
-    HDPTaskTracker     	{{ganglia_server_host}}   8658
-    HDPDataNode       	{{ganglia_server_host}}   8659
-    HDPSlaves       	{{ganglia_server_host}}   8660
-    HDPNameNode         {{ganglia_server_host}}   8661
-    HDPJobTracker     	{{ganglia_server_host}}  8662
-    HDPHBaseMaster      {{ganglia_server_host}}   8663
-    HDPResourceManager  {{ganglia_server_host}}   8664
-    HDPHistoryServer    {{ganglia_server_host}}   8666

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/templates/gangliaEnv.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/templates/gangliaEnv.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/templates/gangliaEnv.sh.j2
deleted file mode 100644
index 1ead550..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/templates/gangliaEnv.sh.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-# Unix users and groups for the binaries we start up.
-GMETAD_USER={{gmetad_user}};
-GMOND_USER={{gmond_user}};
-WEBSERVER_GROUP={{webserver_group}};

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/templates/gangliaLib.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/templates/gangliaLib.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/templates/gangliaLib.sh.j2
deleted file mode 100644
index 4b5bdd1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/GANGLIA/package/templates/gangliaLib.sh.j2
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-GANGLIA_CONF_DIR={{ganglia_conf_dir}};
-GANGLIA_RUNTIME_DIR={{ganglia_runtime_dir}};
-RRDCACHED_BASE_DIR={{rrdcached_base_dir}};
-
-# This file contains all the info about each Ganglia Cluster in our Grid.
-GANGLIA_CLUSTERS_CONF_FILE=./gangliaClusters.conf;
-
-function createDirectory()
-{
-    directoryPath=${1};
-
-    if [ "x" != "x${directoryPath}" ]
-    then
-        mkdir -p ${directoryPath};
-    fi
-}
-
-function getGangliaClusterInfo()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        # Fetch the particular entry for ${clusterName} from ${GANGLIA_CLUSTERS_CONF_FILE}.
-        awk -v clusterName=${clusterName} '($1 !~ /^#/) && ($1 == clusterName)' ${GANGLIA_CLUSTERS_CONF_FILE};
-    else
-        # Spit out all the non-comment, non-empty lines from ${GANGLIA_CLUSTERS_CONF_FILE}.
-        awk '($1 !~ /^#/) && (NF)' ${GANGLIA_CLUSTERS_CONF_FILE};
-    fi
-}
-
-function getConfiguredGangliaClusterNames()
-{
-  # Find all the subdirectories in ${GANGLIA_CONF_DIR} and extract only 
-  # the subdirectory name from each.
-  if [ -e ${GANGLIA_CONF_DIR} ]
-  then  
-    find ${GANGLIA_CONF_DIR} -maxdepth 1 -mindepth 1 -type d | xargs -n1 basename;
-  fi
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/configuration/global.xml
deleted file mode 100644
index 453184b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/configuration/global.xml
+++ /dev/null
@@ -1,160 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>hbasemaster_host</name>
-    <value></value>
-    <description>HBase Master Host.</description>
-  </property>
-  <property>
-    <name>regionserver_hosts</name>
-    <value></value>
-    <description>Region Server Hosts</description>
-  </property>
-  <property>
-    <name>hbase_log_dir</name>
-    <value>/var/log/hbase</value>
-    <description>Log Directories for HBase.</description>
-  </property>
-  <property>
-    <name>hbase_pid_dir</name>
-    <value>/var/run/hbase</value>
-    <description>Log Directories for HBase.</description>
-  </property>
-  <property>
-    <name>hbase_log_dir</name>
-    <value>/var/log/hbase</value>
-    <description>Log Directories for HBase.</description>
-  </property>
-  <property>
-    <name>hbase_regionserver_heapsize</name>
-    <value>1024</value>
-    <description>Log Directories for HBase.</description>
-  </property>
-  <property>
-    <name>hbase_master_heapsize</name>
-    <value>1024</value>
-    <description>HBase Master Heap Size</description>
-  </property>
-  <property>
-    <name>hstore_compactionthreshold</name>
-    <value>3</value>
-    <description>HBase HStore compaction threshold.</description>
-  </property>
-  <property>
-    <name>hfile_blockcache_size</name>
-    <value>0.25</value>
-    <description>HFile block cache size.</description>
-  </property>
-  <property>
-    <name>hstorefile_maxsize</name>
-    <value>10737418240</value>
-    <description>Maximum HStoreFile Size</description>
-  </property>
-    <property>
-    <name>regionserver_handlers</name>
-    <value>30</value>
-    <description>HBase RegionServer Handler</description>
-  </property>
-    <property>
-    <name>hregion_majorcompaction</name>
-    <value>86400000</value>
-    <description>HBase Major Compaction.</description>
-  </property>
-    <property>
-    <name>hregion_blockmultiplier</name>
-    <value>2</value>
-    <description>HBase Region Block Multiplier</description>
-  </property>
-    <property>
-    <name>hregion_memstoreflushsize</name>
-    <value></value>
-    <description>HBase Region MemStore Flush Size.</description>
-  </property>
-    <property>
-    <name>client_scannercaching</name>
-    <value>100</value>
-    <description>Base Client Scanner Caching</description>
-  </property>
-    <property>
-    <name>zookeeper_sessiontimeout</name>
-    <value>60000</value>
-    <description>ZooKeeper Session Timeout</description>
-  </property>
-    <property>
-    <name>hfile_max_keyvalue_size</name>
-    <value>10485760</value>
-    <description>HBase Client Maximum key-value Size</description>
-  </property>
-  <property>
-    <name>hbase_hdfs_root_dir</name>
-    <value>/apps/hbase/data</value>
-    <description>HBase Relative Path to HDFS.</description>
-  </property>
-   <property>
-    <name>hbase_conf_dir</name>
-    <value>/etc/hbase</value>
-    <description>Config Directory for HBase.</description>
-  </property>
-   <property>
-    <name>hdfs_enable_shortcircuit_read</name>
-    <value>true</value>
-    <description>HDFS Short Circuit Read</description>
-  </property>
-   <property>
-    <name>hdfs_support_append</name>
-    <value>true</value>
-    <description>HDFS append support</description>
-  </property>
-   <property>
-    <name>hstore_blockingstorefiles</name>
-    <value>7</value>
-    <description>HStore blocking storefiles.</description>
-  </property>
-   <property>
-    <name>regionserver_memstore_lab</name>
-    <value>true</value>
-    <description>Region Server memstore.</description>
-  </property>
-   <property>
-    <name>regionserver_memstore_lowerlimit</name>
-    <value>0.35</value>
-    <description>Region Server memstore lower limit.</description>
-  </property>
-   <property>
-    <name>regionserver_memstore_upperlimit</name>
-    <value>0.4</value>
-    <description>Region Server memstore upper limit.</description>
-  </property>
-   <property>
-    <name>hbase_conf_dir</name>
-    <value>/etc/hbase</value>
-    <description>HBase conf dir.</description>
-  </property>
-   <property>
-    <name>hbase_user</name>
-    <value>hbase</value>
-    <description>HBase User Name.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/configuration/hbase-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/configuration/hbase-policy.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/configuration/hbase-policy.xml
deleted file mode 100644
index e45f23c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/configuration/hbase-policy.xml
+++ /dev/null
@@ -1,53 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HRegionInterface protocol implementations (ie. 
-    clients talking to HRegionServers)
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.admin.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HMasterInterface protocol implementation (ie. 
-    clients talking to HMaster for admin operations).
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.masterregion.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HMasterRegionInterface protocol implementations
-    (for HRegionServers communicating with HMaster)
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/configuration/hbase-site.xml
deleted file mode 100644
index 68904a1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/configuration/hbase-site.xml
+++ /dev/null
@@ -1,367 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>hbase.rootdir</name>
-    <value>hdfs://localhost:8020/apps/hbase/data</value>
-    <description>The directory shared by region servers and into
-    which HBase persists.  The URL should be 'fully-qualified'
-    to include the filesystem scheme.  For example, to specify the
-    HDFS directory '/hbase' where the HDFS instance's namenode is
-    running at namenode.example.org on port 9000, set this value to:
-    hdfs://namenode.example.org:9000/hbase.  By default HBase writes
-    into /tmp.  Change this configuration else all data will be lost
-    on machine restart.
-    </description>
-  </property>
-  <property>
-    <name>hbase.cluster.distributed</name>
-    <value>true</value>
-    <description>The mode the cluster will be in. Possible values are
-      false for standalone mode and true for distributed mode.  If
-      false, startup will run all HBase and ZooKeeper daemons together
-      in the one JVM.
-    </description>
-  </property>
-  <property>
-    <name>hbase.tmp.dir</name>
-    <value>/hadoop/hbase</value>
-    <description>Temporary directory on the local filesystem.
-    Change this setting to point to a location more permanent
-    than '/tmp' (The '/tmp' directory is often cleared on
-    machine restart).
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.bindAddress</name>
-    <value></value>
-    <description>The bind address for the HBase Master web UI
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.port</name>
-    <value></value>
-    <description>The port for the HBase Master web UI.</description>
-  </property>
-  <property>
-    <name>hbase.regionserver.info.port</name>
-    <value></value>
-    <description>The port for the HBase RegionServer web UI.</description>
-  </property>
-  <property>
-    <name>hbase.regionserver.global.memstore.upperLimit</name>
-    <value>0.4</value>
-    <description>Maximum size of all memstores in a region server before new
-      updates are blocked and flushes are forced. Defaults to 40% of heap
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.handler.count</name>
-    <value>60</value>
-    <description>Count of RPC Listener instances spun up on RegionServers.
-    Same property is used by the Master for count of master handlers.
-    Default is 10.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.majorcompaction</name>
-    <value>86400000</value>
-    <description>The time (in milliseconds) between 'major' compactions of all
-    HStoreFiles in a region.  Default: 1 day.
-    Set to 0 to disable automated major compactions.
-    </description>
-  </property>
-  
-  <property>
-    <name>hbase.regionserver.global.memstore.lowerLimit</name>
-    <value>0.38</value>
-    <description>When memstores are being forced to flush to make room in
-      memory, keep flushing until we hit this mark. Defaults to 35% of heap.
-      This value equal to hbase.regionserver.global.memstore.upperLimit causes
-      the minimum possible flushing to occur when updates are blocked due to
-      memstore limiting.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.block.multiplier</name>
-    <value>2</value>
-    <description>Block updates if memstore has hbase.hregion.memstore.block.multiplier
-    time hbase.hregion.flush.size bytes.  Useful preventing
-    runaway memstore during spikes in update traffic.  Without an
-    upper-bound, memstore fills such that when it flushes the
-    resultant flush files take a long time to compact or split, or
-    worse, we OOME
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.flush.size</name>
-    <value>134217728</value>
-    <description>
-    Memstore will be flushed to disk if size of the memstore
-    exceeds this number of bytes.  Value is checked by a thread that runs
-    every hbase.server.thread.wakefrequency.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.mslab.enabled</name>
-    <value>true</value>
-    <description>
-      Enables the MemStore-Local Allocation Buffer,
-      a feature which works to prevent heap fragmentation under
-      heavy write loads. This can reduce the frequency of stop-the-world
-      GC pauses on large heaps.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.max.filesize</name>
-    <value>10737418240</value>
-    <description>
-    Maximum HStoreFile size. If any one of a column families' HStoreFiles has
-    grown to exceed this value, the hosting HRegion is split in two.
-    Default: 1G.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.scanner.caching</name>
-    <value>100</value>
-    <description>Number of rows that will be fetched when calling next
-    on a scanner if it is not served from (local, client) memory. Higher
-    caching values will enable faster scanners but will eat up more memory
-    and some calls of next may take longer and longer times when the cache is empty.
-    Do not set this value such that the time between invocations is greater
-    than the scanner timeout; i.e. hbase.regionserver.lease.period
-    </description>
-  </property>
-  <property>
-    <name>zookeeper.session.timeout</name>
-    <value>60000</value>
-    <description>ZooKeeper session timeout.
-      HBase passes this to the zk quorum as suggested maximum time for a
-      session (This setting becomes zookeeper's 'maxSessionTimeout').  See
-      http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions
-      "The client sends a requested timeout, the server responds with the
-      timeout that it can give the client. " In milliseconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.keyvalue.maxsize</name>
-    <value>10485760</value>
-    <description>Specifies the combined maximum allowed size of a KeyValue
-    instance. This is to set an upper boundary for a single entry saved in a
-    storage file. Since they cannot be split it helps avoiding that a region
-    cannot be split any further because the data is too large. It seems wise
-    to set this to a fraction of the maximum region size. Setting it to zero
-    or less disables the check.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.compactionThreshold</name>
-    <value>3</value>
-    <description>
-    If more than this number of HStoreFiles in any one HStore
-    (one HStoreFile is written per flush of memstore) then a compaction
-    is run to rewrite all HStoreFiles files as one.  Larger numbers
-    put off compaction but when it runs, it takes longer to complete.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.blockingStoreFiles</name>
-    <value>10</value>
-    <description>
-    If more than this number of StoreFiles in any one Store
-    (one StoreFile is written per flush of MemStore) then updates are
-    blocked for this HRegion until a compaction is completed, or
-    until hbase.hstore.blockingWaitTime has been exceeded.
-    </description>
-  </property>
-  <property>
-    <name>hfile.block.cache.size</name>
-    <value>0.40</value>
-    <description>
-        Percentage of maximum heap (-Xmx setting) to allocate to block cache
-        used by HFile/StoreFile. Default of 0.25 means allocate 25%.
-        Set to 0 to disable but it's not recommended.
-    </description>
-  </property>
-
-  <!-- The following properties configure authentication information for
-       HBase processes when using Kerberos security.  There are no default
-       values, included here for documentation purposes -->
-  <property>
-    <name>hbase.master.keytab.file</name>
-    <value></value>
-    <description>Full path to the kerberos keytab file to use for logging in
-    the configured HMaster server principal.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.kerberos.principal</name>
-    <value></value>
-    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
-    that should be used to run the HMaster process.  The principal name should
-    be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the hostname
-    portion, it will be replaced with the actual hostname of the running
-    instance.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.keytab.file</name>
-    <value></value>
-    <description>Full path to the kerberos keytab file to use for logging in
-    the configured HRegionServer server principal.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.kerberos.principal</name>
-    <value></value>
-    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
-    that should be used to run the HRegionServer process.  The principal name
-    should be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the
-    hostname portion, it will be replaced with the actual hostname of the
-    running instance.  An entry for this principal must exist in the file
-    specified in hbase.regionserver.keytab.file
-    </description>
-  </property>
-
-  <!-- Additional configuration specific to HBase security -->
-  <property>
-    <name>hbase.superuser</name>
-    <value>hbase</value>
-    <description>List of users or groups (comma-separated), who are allowed
-    full privileges, regardless of stored ACLs, across the cluster.
-    Only used when HBase security is enabled.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.security.authentication</name>
-    <value>simple</value>
-  </property>
-
-  <property>
-    <name>hbase.rpc.engine</name>
-    <value>org.apache.hadoop.hbase.ipc.WritableRpcEngine</value>
-  </property>
-
-  <property>
-    <name>hbase.security.authorization</name>
-    <value>false</value>
-    <description>Enables HBase authorization. Set the value of this property to false to disable HBase authorization.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.coprocessor.region.classes</name>
-    <value></value>
-    <description>A comma-separated list of Coprocessors that are loaded by
-    default on all tables. For any override coprocessor method, these classes
-    will be called in order. After implementing your own Coprocessor, just put
-    it in HBase's classpath and add the fully qualified class name here.
-    A coprocessor can also be loaded on demand by setting HTableDescriptor.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.coprocessor.master.classes</name>
-    <value></value>
-    <description>A comma-separated list of
-      org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
-      loaded by default on the active HMaster process. For any implemented
-      coprocessor methods, the listed classes will be called in order. After
-      implementing your own MasterObserver, just put it in HBase's classpath
-      and add the fully qualified class name here.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.zookeeper.property.clientPort</name>
-    <value>2181</value>
-    <description>Property from ZooKeeper's config zoo.cfg.
-    The port at which the clients will connect.
-    </description>
-  </property>
-
-  <!--
-  The following three properties are used together to create the list of
-  host:peer_port:leader_port quorum servers for ZooKeeper.
-  -->
-  <property>
-    <name>hbase.zookeeper.quorum</name>
-    <value>localhost</value>
-    <description>Comma separated list of servers in the ZooKeeper Quorum.
-    For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
-    By default this is set to localhost for local and pseudo-distributed modes
-    of operation. For a fully-distributed setup, this should be set to a full
-    list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
-    this is the list of servers which we will start/stop ZooKeeper on.
-    </description>
-  </property>
-  <!-- End of properties used to generate ZooKeeper host:port quorum list. -->
-
-  <property>
-    <name>dfs.support.append</name>
-    <value>true</value>
-    <description>Does HDFS allow appends to files?
-    This is an hdfs config. set in here so the hdfs client will do append support.
-    You must ensure that this config. is true serverside too when running hbase
-    (You will have to restart your cluster after setting it).
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.client.read.shortcircuit</name>
-    <value>true</value>
-    <description>Enable/Disable short circuit read for your client.
-    Hadoop servers should be configured to allow short circuit read
-    for the hbase user for this to take effect
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.client.read.shortcircuit.skip.checksum</name>
-    <value></value>
-    <description>Enable/disbale skipping the checksum check</description>
-  </property>
-  
-  <property>
-    <name>hbase.zookeeper.useMulti</name>
-    <value>true</value>
-    <description>Instructs HBase to make use of ZooKeeper's multi-update functionality.
-    This allows certain ZooKeeper operations to complete more quickly and prevents some issues
-    with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).·
-    IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+
-    and will not be downgraded.  ZooKeeper versions before 3.4 do not support multi-update and will
-    not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).
-    </description>
-  </property>
-  <property>
-    <name>zookeeper.znode.parent</name>
-    <value>/hbase-unsecure</value>
-    <description>Root ZNode for HBase in ZooKeeper. All of HBase's ZooKeeper
-      files that are configured with a relative path will go under this node.
-      By default, all of HBase's ZooKeeper file path are configured with a
-      relative path, so they will all go under this directory unless changed.
-    </description>
-  </property>
-
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/metainfo.xml
deleted file mode 100644
index 4c610db..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/metainfo.xml
+++ /dev/null
@@ -1,123 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>HBASE</name>
-      <comment>Non-relational distributed database and centralized service for configuration management &amp;
-        synchronization
-      </comment>
-      <version>0.94.6.1.3.3.0</version>
-      <components>
-        <component>
-          <name>HBASE_MASTER</name>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
-              <scope>cluster</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-                <co-locate>HBASE/HBASE_MASTER</co-locate>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/hbase_master.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <customCommands>
-            <customCommand>
-              <name>DECOMMISSION</name>
-              <commandScript>
-                <script>scripts/hbase_master.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>600</timeout>
-              </commandScript>
-            </customCommand>
-          </customCommands>
-        </component>
-
-        <component>
-          <name>HBASE_REGIONSERVER</name>
-          <category>SLAVE</category>
-          <cardinality>1+</cardinality>
-          <commandScript>
-            <script>scripts/hbase_regionserver.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-          <customCommands>
-            <customCommand>
-              <name>DECOMMISSION</name>
-              <commandScript>
-                <script>scripts/hbase_regionserver.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>600</timeout>
-              </commandScript>
-            </customCommand>
-          </customCommands>
-        </component>
-
-        <component>
-          <name>HBASE_CLIENT</name>
-          <category>CLIENT</category>
-          <cardinality>0+</cardinality>
-          <commandScript>
-            <script>scripts/hbase_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osType>centos6</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>hbase</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <configuration-dependencies>
-        <config-type>global</config-type>
-        <config-type>hbase-policy</config-type>
-        <config-type>hbase-site</config-type>
-      </configuration-dependencies>
-
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/files/hbaseSmokeVerify.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/files/hbaseSmokeVerify.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/files/hbaseSmokeVerify.sh
deleted file mode 100644
index 39fe6e5..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/files/hbaseSmokeVerify.sh
+++ /dev/null
@@ -1,32 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-conf_dir=$1
-data=$2
-echo "scan 'ambarismoketest'" | hbase --config $conf_dir shell > /tmp/hbase_chk_verify
-cat /tmp/hbase_chk_verify
-echo "Looking for $data"
-grep -q $data /tmp/hbase_chk_verify
-if [ "$?" -ne 0 ]
-then
-  exit 1
-fi
-
-grep -q '1 row(s)' /tmp/hbase_chk_verify
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/scripts/__init__.py
deleted file mode 100644
index 5561e10..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/scripts/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/scripts/functions.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/scripts/functions.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/scripts/functions.py
deleted file mode 100644
index 80b49e6..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/scripts/functions.py
+++ /dev/null
@@ -1,67 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-import re
-import math
-import datetime
-
-from resource_management.core.shell import checked_call
-
-def calc_xmn_from_xms(heapsize_str, xmn_percent, xmn_max):
-  """
-  @param heapsize_str: str (e.g '1000m')
-  @param xmn_percent: float (e.g 0.2)
-  @param xmn_max: integer (e.g 512)
-  """
-  heapsize = int(re.search('\d+',heapsize_str).group(0))
-  heapsize_unit = re.search('\D+',heapsize_str).group(0)
-  xmn_val = int(math.floor(heapsize*xmn_percent))
-  xmn_val -= xmn_val % 8
-  
-  result_xmn_val = xmn_max if xmn_val > xmn_max else xmn_val
-  return str(result_xmn_val) + heapsize_unit
-
-def get_unique_id_and_date():
-    code, out = checked_call("hostid")
-    id = out.strip()
-    
-    now = datetime.datetime.now()
-    date = now.strftime("%M%d%y")
-
-    return "id{id}_date{date}".format(id=id, date=date)
-  
-def get_kinit_path(pathes_list):
-  """
-  @param pathes: comma separated list
-  """
-  kinit_path = ""
-  
-  for x in pathes_list:
-    if not x:
-      continue
-    
-    path = os.path.join(x,"kinit")
-
-    if os.path.isfile(path):
-      kinit_path = path
-      break
-    
-  return kinit_path

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/scripts/hbase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/scripts/hbase.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/scripts/hbase.py
deleted file mode 100644
index bd33463..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/scripts/hbase.py
+++ /dev/null
@@ -1,91 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import sys
-
-def hbase(type=None # 'master' or 'regionserver' or 'client'
-              ):
-  import params
-  
-  Directory( params.conf_dir,
-      owner = params.hbase_user,
-      group = params.user_group,
-      recursive = True
-  )
-  
-  XmlConfig( "hbase-site.xml",
-            conf_dir = params.conf_dir,
-            configurations = params.config['configurations']['hbase-site'],
-            owner = params.hbase_user,
-            group = params.user_group
-  )
-
-  XmlConfig( "hdfs-site.xml",
-            conf_dir = params.conf_dir,
-            configurations = params.config['configurations']['hdfs-site'],
-            owner = params.hbase_user,
-            group = params.user_group
-  )
-  
-  if 'hbase-policy' in params.config['configurations']:
-    XmlConfig( "hbase-policy.xml",
-      configurations = params.config['configurations']['hbase-policy'],
-      owner = params.hbase_user,
-      group = params.user_group
-    )
-  # Manually overriding ownership of file installed by hadoop package
-  else: 
-    File( format("{conf_dir}/hbase-policy.xml"),
-      owner = params.hbase_user,
-      group = params.user_group
-    )
-  
-  hbase_TemplateConfig( 'hbase-env.sh')     
-       
-  hbase_TemplateConfig( params.metric_prop_file_name,
-    tag = 'GANGLIA-MASTER' if type == 'master' else 'GANGLIA-RS'
-  )
-
-  hbase_TemplateConfig( 'regionservers')
-
-  if params.security_enabled:
-    hbase_TemplateConfig( format("hbase_{type}_jaas.conf"))
-  
-  if type != "client":
-    Directory( params.pid_dir,
-      owner = params.hbase_user,
-      recursive = True
-    )
-  
-    Directory ( [params.tmp_dir, params.log_dir],
-      owner = params.hbase_user,
-      recursive = True
-    )    
-
-def hbase_TemplateConfig(name, 
-                         tag=None
-                         ):
-  import params
-
-  TemplateConfig( format("{conf_dir}/{name}"),
-      owner = params.hbase_user,
-      template_tag = tag
-  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/scripts/hbase_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/scripts/hbase_client.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/scripts/hbase_client.py
deleted file mode 100644
index 0f2a1bc..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/scripts/hbase_client.py
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from hbase import hbase
-
-         
-class HbaseClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-    
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    
-    hbase(type='client')
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-#for tests
-def main():
-  command_type = 'install'
-  command_data_file = '/root/workspace/HBase/input.json'
-  basedir = '/root/workspace/HBase/'
-  stdoutfile = '/1.txt'
-  sys.argv = ["", command_type, command_data_file, basedir, stdoutfile]
-  
-  HbaseClient().execute()
-  
-if __name__ == "__main__":
-  HbaseClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/scripts/hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/scripts/hbase_master.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/scripts/hbase_master.py
deleted file mode 100644
index d94b4b4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/scripts/hbase_master.py
+++ /dev/null
@@ -1,74 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from hbase import hbase
-from hbase_service import hbase_service
-
-         
-class HbaseMaster(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-    
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    hbase(type='master')
-    
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env) # for security
-
-    hbase_service( 'master',
-      action = 'start'
-    )
-    
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    hbase_service( 'master',
-      action = 'stop'
-    )
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    pid_file = format("{pid_dir}/hbase-hbase-master.pid")
-    check_process_status(pid_file)
-
-def main():
-  command_type = sys.argv[1] if len(sys.argv)>1 else "install"
-  print "Running "+command_type
-  command_data_file = '/var/lib/ambari-agent/data/command-3.json'
-  basedir = '/root/ambari/ambari-server/src/main/resources/stacks/HDP/2.0._/services/HBASE/package'
-  stroutputf = '/1.txt'
-  sys.argv = ["", command_type, command_data_file, basedir, stroutputf]
-  
-  HbaseMaster().execute()
-  
-if __name__ == "__main__":
-  HbaseMaster().execute()
-  #main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/scripts/hbase_regionserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/scripts/hbase_regionserver.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/scripts/hbase_regionserver.py
deleted file mode 100644
index 2d91e75..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/scripts/hbase_regionserver.py
+++ /dev/null
@@ -1,75 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from hbase import hbase
-from hbase_service import hbase_service
-
-         
-class HbaseRegionServer(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-    
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    hbase(type='regionserver')
-      
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env) # for security
-
-    hbase_service( 'regionserver',
-      action = 'start'
-    )
-    
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    hbase_service( 'regionserver',
-      action = 'stop'
-    )
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    pid_file = format("{pid_dir}/hbase-hbase-regionserver.pid")
-    check_process_status(pid_file)
-    
-  def decommission(self, env):
-    print "Decommission not yet implemented!"
-    
-def main():
-  command_type = sys.argv[1] if len(sys.argv)>1 else "stop"
-  print "Running "+command_type
-  command_data_file = '/root/workspace/HBase/input.json'
-  basedir = '/root/workspace/HBase/main'
-  sys.argv = ["", command_type, command_data_file, basedir]
-  
-  HbaseRegionServer().execute()
-  
-if __name__ == "__main__":
-  HbaseRegionServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/scripts/hbase_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/scripts/hbase_service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/scripts/hbase_service.py
deleted file mode 100644
index 7a1248b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/HBASE/package/scripts/hbase_service.py
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-def hbase_service(
-  name,
-  action = 'start'): # 'start' or 'stop' or 'status'
-    
-    import params
-  
-    role = name
-    cmd = format("{daemon_script} --config {conf_dir}")
-    pid_file = format("{pid_dir}/hbase-hbase-{role}.pid")
-    
-    daemon_cmd = None
-    no_op_test = None
-    
-    if action == 'start':
-      daemon_cmd = format("{cmd} start {role}")
-      no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
-    elif action == 'stop':
-      daemon_cmd = format("{cmd} stop {role} && rm -f {pid_file}")
-
-    if daemon_cmd is not None:
-      Execute ( daemon_cmd,
-        not_if = no_op_test,
-        user = params.hbase_user
-      )


[12/12] git commit: AMBARI-4336. Move 1.3.4 stack to 1.3.3 using the python libraries. (mahadev)

Posted by ma...@apache.org.
AMBARI-4336. Move 1.3.4 stack to 1.3.3 using the python libraries. (mahadev)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/92583535
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/92583535
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/92583535

Branch: refs/heads/trunk
Commit: 92583535dc8ad24c49f9d7f4c6c9c8b56575c497
Parents: 186d6a7
Author: Mahadev Konar <ma...@apache.org>
Authored: Fri Jan 17 11:31:56 2014 -0800
Committer: Mahadev Konar <ma...@apache.org>
Committed: Fri Jan 17 11:31:56 2014 -0800

----------------------------------------------------------------------
 .../HDP/1.3.3/services/FLUME/metainfo.xml       |    1 +
 .../HDP/1.3.3/services/GANGLIA/metainfo.xml     |  102 +-
 .../HDP/1.3.3/services/HBASE/metainfo.xml       |  113 +-
 .../services/HCATALOG/configuration/global.xml  |   45 -
 .../HDP/1.3.3/services/HCATALOG/metainfo.xml    |   30 -
 .../services/HDFS/configuration/hdfs-site.xml   |  372 ++---
 .../stacks/HDP/1.3.3/services/HDFS/metainfo.xml |  134 +-
 .../services/HIVE/configuration/global.xml      |   23 +
 .../services/HIVE/configuration/hive-site.xml   |    8 +-
 .../stacks/HDP/1.3.3/services/HIVE/metainfo.xml |  175 ++-
 .../stacks/HDP/1.3.3/services/HUE/metainfo.xml  |    1 +
 .../MAPREDUCE/configuration/mapred-site.xml     |  552 ++++---
 .../HDP/1.3.3/services/MAPREDUCE/metainfo.xml   |   92 +-
 .../HDP/1.3.3/services/NAGIOS/metainfo.xml      |   94 +-
 .../HDP/1.3.3/services/OOZIE/metainfo.xml       |  103 +-
 .../stacks/HDP/1.3.3/services/PIG/metainfo.xml  |   47 +-
 .../HDP/1.3.3/services/SQOOP/metainfo.xml       |   63 +-
 .../HDP/1.3.3/services/WEBHCAT/metainfo.xml     |   91 +-
 .../HDP/1.3.3/services/ZOOKEEPER/metainfo.xml   |   57 +-
 .../before-INSTALL/files/changeToSecureUid.sh   |   50 -
 .../1.3.4/hooks/before-INSTALL/scripts/hook.py  |   36 -
 .../hooks/before-INSTALL/scripts/params.py      |   81 --
 .../scripts/shared_initialization.py            |  107 --
 .../hooks/before-START/files/checkForFormat.sh  |   62 -
 .../before-START/files/task-log4j.properties    |  132 --
 .../1.3.4/hooks/before-START/scripts/hook.py    |   37 -
 .../1.3.4/hooks/before-START/scripts/params.py  |  172 ---
 .../scripts/shared_initialization.py            |  322 -----
 .../templates/commons-logging.properties.j2     |   25 -
 .../templates/exclude_hosts_list.j2             |    3 -
 .../before-START/templates/hadoop-env.sh.j2     |  121 --
 .../templates/hadoop-metrics2.properties.j2     |   45 -
 .../hooks/before-START/templates/hdfs.conf.j2   |   17 -
 .../before-START/templates/health_check-v2.j2   |   91 --
 .../before-START/templates/health_check.j2      |  118 --
 .../templates/include_hosts_list.j2             |    3 -
 .../before-START/templates/log4j.properties.j2  |  200 ---
 .../hooks/before-START/templates/slaves.j2      |    3 -
 .../hooks/before-START/templates/snmpd.conf.j2  |   48 -
 .../templates/taskcontroller.cfg.j2             |   20 -
 .../resources/stacks/HDP/1.3.4/metainfo.xml     |   22 -
 .../stacks/HDP/1.3.4/repos/repoinfo.xml         |   75 -
 .../services/FLUME/configuration/global.xml     |   24 -
 .../HDP/1.3.4/services/FLUME/metainfo.xml       |   31 -
 .../services/GANGLIA/configuration/global.xml   |   55 -
 .../HDP/1.3.4/services/GANGLIA/metainfo.xml     |  106 --
 .../GANGLIA/package/files/checkGmetad.sh        |   37 -
 .../GANGLIA/package/files/checkGmond.sh         |   62 -
 .../GANGLIA/package/files/checkRrdcached.sh     |   34 -
 .../services/GANGLIA/package/files/gmetad.init  |   73 -
 .../services/GANGLIA/package/files/gmetadLib.sh |  204 ---
 .../services/GANGLIA/package/files/gmond.init   |   73 -
 .../services/GANGLIA/package/files/gmondLib.sh  |  545 -------
 .../1.3.4/services/GANGLIA/package/files/rrd.py |  213 ---
 .../GANGLIA/package/files/rrdcachedLib.sh       |   47 -
 .../GANGLIA/package/files/setupGanglia.sh       |  141 --
 .../GANGLIA/package/files/startGmetad.sh        |   64 -
 .../GANGLIA/package/files/startGmond.sh         |   80 --
 .../GANGLIA/package/files/startRrdcached.sh     |   69 -
 .../GANGLIA/package/files/stopGmetad.sh         |   43 -
 .../services/GANGLIA/package/files/stopGmond.sh |   54 -
 .../GANGLIA/package/files/stopRrdcached.sh      |   41 -
 .../GANGLIA/package/files/teardownGanglia.sh    |   28 -
 .../services/GANGLIA/package/scripts/ganglia.py |  106 --
 .../GANGLIA/package/scripts/ganglia_monitor.py  |  163 ---
 .../package/scripts/ganglia_monitor_service.py  |   31 -
 .../GANGLIA/package/scripts/ganglia_server.py   |  181 ---
 .../package/scripts/ganglia_server_service.py   |   27 -
 .../services/GANGLIA/package/scripts/params.py  |   74 -
 .../GANGLIA/package/scripts/status_params.py    |   25 -
 .../package/templates/gangliaClusters.conf.j2   |   34 -
 .../GANGLIA/package/templates/gangliaEnv.sh.j2  |   24 -
 .../GANGLIA/package/templates/gangliaLib.sh.j2  |   62 -
 .../services/HBASE/configuration/global.xml     |  160 ---
 .../HBASE/configuration/hbase-policy.xml        |   53 -
 .../services/HBASE/configuration/hbase-site.xml |  367 -----
 .../HDP/1.3.4/services/HBASE/metainfo.xml       |  123 --
 .../HBASE/package/files/hbaseSmokeVerify.sh     |   32 -
 .../services/HBASE/package/scripts/__init__.py  |   19 -
 .../services/HBASE/package/scripts/functions.py |   67 -
 .../services/HBASE/package/scripts/hbase.py     |   91 --
 .../HBASE/package/scripts/hbase_client.py       |   52 -
 .../HBASE/package/scripts/hbase_master.py       |   74 -
 .../HBASE/package/scripts/hbase_regionserver.py |   75 -
 .../HBASE/package/scripts/hbase_service.py      |   46 -
 .../services/HBASE/package/scripts/params.py    |   84 --
 .../HBASE/package/scripts/service_check.py      |   89 --
 .../HBASE/package/scripts/status_params.py      |   25 -
 .../hadoop-metrics.properties-GANGLIA-MASTER.j2 |   50 -
 .../hadoop-metrics.properties-GANGLIA-RS.j2     |   50 -
 .../templates/hadoop-metrics.properties.j2      |   50 -
 .../HBASE/package/templates/hbase-env.sh.j2     |   82 --
 .../HBASE/package/templates/hbase-smoke.sh.j2   |   26 -
 .../package/templates/hbase_client_jaas.conf.j2 |   23 -
 .../templates/hbase_grant_permissions.j2        |   21 -
 .../package/templates/hbase_master_jaas.conf.j2 |   25 -
 .../templates/hbase_regionserver_jaas.conf.j2   |   25 -
 .../HBASE/package/templates/regionservers.j2    |    2 -
 .../services/HDFS/configuration/core-site.xml   |  253 ----
 .../services/HDFS/configuration/global.xml      |  187 ---
 .../HDFS/configuration/hadoop-policy.xml        |  134 --
 .../services/HDFS/configuration/hdfs-site.xml   |  476 ------
 .../stacks/HDP/1.3.4/services/HDFS/metainfo.xml |  146 --
 .../HDFS/package/files/checkForFormat.sh        |   62 -
 .../services/HDFS/package/files/checkWebUI.py   |   53 -
 .../services/HDFS/package/scripts/datanode.py   |   57 -
 .../HDFS/package/scripts/hdfs_client.py         |   52 -
 .../HDFS/package/scripts/hdfs_datanode.py       |   59 -
 .../HDFS/package/scripts/hdfs_namenode.py       |  192 ---
 .../HDFS/package/scripts/hdfs_snamenode.py      |   53 -
 .../services/HDFS/package/scripts/namenode.py   |   66 -
 .../services/HDFS/package/scripts/params.py     |  165 ---
 .../HDFS/package/scripts/service_check.py       |  106 --
 .../services/HDFS/package/scripts/snamenode.py  |   64 -
 .../HDFS/package/scripts/status_params.py       |   31 -
 .../services/HDFS/package/scripts/utils.py      |  133 --
 .../package/templates/exclude_hosts_list.j2     |    3 -
 .../services/HIVE/configuration/global.xml      |  148 --
 .../services/HIVE/configuration/hive-site.xml   |  236 ---
 .../stacks/HDP/1.3.4/services/HIVE/metainfo.xml |  186 ---
 .../services/HIVE/package/files/addMysqlUser.sh |   41 -
 .../services/HIVE/package/files/hcatSmoke.sh    |   35 -
 .../services/HIVE/package/files/hiveSmoke.sh    |   23 -
 .../services/HIVE/package/files/hiveserver2.sql |   23 -
 .../HIVE/package/files/hiveserver2Smoke.sh      |   31 -
 .../services/HIVE/package/files/pigSmoke.sh     |   18 -
 .../HIVE/package/files/startHiveserver2.sh      |   22 -
 .../HIVE/package/files/startMetastore.sh        |   22 -
 .../services/HIVE/package/scripts/__init__.py   |   19 -
 .../1.3.4/services/HIVE/package/scripts/hcat.py |   47 -
 .../HIVE/package/scripts/hcat_client.py         |   41 -
 .../HIVE/package/scripts/hcat_service_check.py  |   63 -
 .../1.3.4/services/HIVE/package/scripts/hive.py |  122 --
 .../HIVE/package/scripts/hive_client.py         |   41 -
 .../HIVE/package/scripts/hive_metastore.py      |   63 -
 .../HIVE/package/scripts/hive_server.py         |   63 -
 .../HIVE/package/scripts/hive_service.py        |   56 -
 .../HIVE/package/scripts/mysql_server.py        |   77 -
 .../HIVE/package/scripts/mysql_service.py       |   38 -
 .../services/HIVE/package/scripts/params.py     |  123 --
 .../HIVE/package/scripts/service_check.py       |   56 -
 .../HIVE/package/scripts/status_params.py       |   30 -
 .../HIVE/package/templates/hcat-env.sh.j2       |   25 -
 .../HIVE/package/templates/hive-env.sh.j2       |   55 -
 .../1.3.4/services/HUE/configuration/global.xml |   35 -
 .../services/HUE/configuration/hue-site.xml     |  290 ----
 .../stacks/HDP/1.3.4/services/HUE/metainfo.xml  |   32 -
 .../configuration/capacity-scheduler.xml        |  195 ---
 .../MAPREDUCE/configuration/core-site.xml       |   20 -
 .../services/MAPREDUCE/configuration/global.xml |  160 ---
 .../configuration/mapred-queue-acls.xml         |   39 -
 .../MAPREDUCE/configuration/mapred-site.xml     |  601 --------
 .../HDP/1.3.4/services/MAPREDUCE/metainfo.xml   |  102 --
 .../MAPREDUCE/package/scripts/client.py         |   43 -
 .../MAPREDUCE/package/scripts/historyserver.py  |   59 -
 .../MAPREDUCE/package/scripts/jobtracker.py     |  104 --
 .../MAPREDUCE/package/scripts/mapreduce.py      |   50 -
 .../MAPREDUCE/package/scripts/params.py         |   54 -
 .../MAPREDUCE/package/scripts/service.py        |   56 -
 .../MAPREDUCE/package/scripts/service_check.py  |   89 --
 .../MAPREDUCE/package/scripts/status_params.py  |   33 -
 .../MAPREDUCE/package/scripts/tasktracker.py    |  104 --
 .../package/templates/exclude_hosts_list.j2     |    3 -
 .../services/NAGIOS/configuration/global.xml    |   50 -
 .../HDP/1.3.4/services/NAGIOS/metainfo.xml      |  106 --
 .../NAGIOS/package/files/check_aggregate.php    |  243 ----
 .../services/NAGIOS/package/files/check_cpu.pl  |  114 --
 .../package/files/check_datanode_storage.php    |  100 --
 .../NAGIOS/package/files/check_hdfs_blocks.php  |  115 --
 .../package/files/check_hdfs_capacity.php       |  109 --
 .../files/check_hive_metastore_status.sh        |   45 -
 .../NAGIOS/package/files/check_hue_status.sh    |   31 -
 .../files/check_mapred_local_dir_used.sh        |   34 -
 .../package/files/check_name_dir_status.php     |   93 --
 .../NAGIOS/package/files/check_namenodes_ha.sh  |   82 --
 .../package/files/check_nodemanager_health.sh   |   44 -
 .../NAGIOS/package/files/check_oozie_status.sh  |   45 -
 .../NAGIOS/package/files/check_rpcq_latency.php |  104 --
 .../package/files/check_templeton_status.sh     |   45 -
 .../NAGIOS/package/files/check_webui.sh         |   87 --
 .../NAGIOS/package/files/hdp_nagios_init.php    |   81 --
 .../NAGIOS/package/scripts/functions.py         |   31 -
 .../services/NAGIOS/package/scripts/nagios.py   |   97 --
 .../NAGIOS/package/scripts/nagios_server.py     |   87 --
 .../package/scripts/nagios_server_config.py     |   91 --
 .../NAGIOS/package/scripts/nagios_service.py    |   36 -
 .../services/NAGIOS/package/scripts/params.py   |  168 ---
 .../NAGIOS/package/scripts/status_params.py     |   26 -
 .../NAGIOS/package/templates/contacts.cfg.j2    |   91 --
 .../package/templates/hadoop-commands.cfg.j2    |  114 --
 .../package/templates/hadoop-hostgroups.cfg.j2  |   33 -
 .../package/templates/hadoop-hosts.cfg.j2       |   34 -
 .../templates/hadoop-servicegroups.cfg.j2       |   98 --
 .../package/templates/hadoop-services.cfg.j2    |  714 ---------
 .../NAGIOS/package/templates/nagios.cfg.j2      | 1349 ------------------
 .../NAGIOS/package/templates/nagios.conf.j2     |   62 -
 .../services/NAGIOS/package/templates/nagios.j2 |  146 --
 .../NAGIOS/package/templates/resource.cfg.j2    |   51 -
 .../services/OOZIE/configuration/global.xml     |  105 --
 .../services/OOZIE/configuration/oozie-site.xml |  237 ---
 .../HDP/1.3.4/services/OOZIE/metainfo.xml       |  113 --
 .../services/OOZIE/package/files/oozieSmoke.sh  |   93 --
 .../OOZIE/package/files/wrap_ooziedb.sh         |   31 -
 .../services/OOZIE/package/scripts/oozie.py     |   99 --
 .../OOZIE/package/scripts/oozie_client.py       |   53 -
 .../OOZIE/package/scripts/oozie_server.py       |   65 -
 .../OOZIE/package/scripts/oozie_service.py      |   45 -
 .../services/OOZIE/package/scripts/params.py    |   64 -
 .../OOZIE/package/scripts/service_check.py      |   47 -
 .../OOZIE/package/scripts/status_params.py      |   26 -
 .../OOZIE/package/templates/oozie-env.sh.j2     |   64 -
 .../package/templates/oozie-log4j.properties.j2 |   74 -
 .../services/PIG/configuration/pig.properties   |   52 -
 .../stacks/HDP/1.3.4/services/PIG/metainfo.xml  |   61 -
 .../services/PIG/package/files/pigSmoke.sh      |   18 -
 .../services/PIG/package/scripts/params.py      |   36 -
 .../1.3.4/services/PIG/package/scripts/pig.py   |   46 -
 .../services/PIG/package/scripts/pig_client.py  |   52 -
 .../PIG/package/scripts/service_check.py        |   75 -
 .../PIG/package/templates/log4j.properties.j2   |   30 -
 .../PIG/package/templates/pig-env.sh.j2         |   17 -
 .../PIG/package/templates/pig.properties.j2     |   55 -
 .../HDP/1.3.4/services/SQOOP/metainfo.xml       |   77 -
 .../services/SQOOP/package/scripts/__init__.py  |   18 -
 .../services/SQOOP/package/scripts/params.py    |   36 -
 .../SQOOP/package/scripts/service_check.py      |   36 -
 .../services/SQOOP/package/scripts/sqoop.py     |   51 -
 .../SQOOP/package/scripts/sqoop_client.py       |   40 -
 .../SQOOP/package/templates/sqoop-env.sh.j2     |   36 -
 .../WEBHCAT/configuration/webhcat-site.xml      |  126 --
 .../HDP/1.3.4/services/WEBHCAT/metainfo.xml     |   97 --
 .../WEBHCAT/package/files/templetonSmoke.sh     |   95 --
 .../WEBHCAT/package/scripts/__init__.py         |   21 -
 .../services/WEBHCAT/package/scripts/params.py  |   51 -
 .../WEBHCAT/package/scripts/service_check.py    |   45 -
 .../WEBHCAT/package/scripts/status_params.py    |   26 -
 .../services/WEBHCAT/package/scripts/webhcat.py |  120 --
 .../WEBHCAT/package/scripts/webhcat_server.py   |   54 -
 .../WEBHCAT/package/scripts/webhcat_service.py  |   41 -
 .../WEBHCAT/package/templates/webhcat-env.sh.j2 |   44 -
 .../services/ZOOKEEPER/configuration/global.xml |   75 -
 .../HDP/1.3.4/services/ZOOKEEPER/metainfo.xml   |   72 -
 .../services/ZOOKEEPER/package/files/zkEnv.sh   |   96 --
 .../ZOOKEEPER/package/files/zkServer.sh         |  120 --
 .../ZOOKEEPER/package/files/zkService.sh        |   26 -
 .../services/ZOOKEEPER/package/files/zkSmoke.sh |   78 -
 .../ZOOKEEPER/package/scripts/__init__.py       |   21 -
 .../ZOOKEEPER/package/scripts/params.py         |   71 -
 .../ZOOKEEPER/package/scripts/service_check.py  |   47 -
 .../ZOOKEEPER/package/scripts/status_params.py  |   26 -
 .../ZOOKEEPER/package/scripts/zookeeper.py      |   92 --
 .../package/scripts/zookeeper_client.py         |   43 -
 .../package/scripts/zookeeper_server.py         |   55 -
 .../package/scripts/zookeeper_service.py        |   43 -
 .../package/templates/configuration.xsl.j2      |   37 -
 .../package/templates/log4j.properties.j2       |   71 -
 .../ZOOKEEPER/package/templates/zoo.cfg.j2      |   51 -
 .../package/templates/zookeeper-env.sh.j2       |   25 -
 .../templates/zookeeper_client_jaas.conf.j2     |   22 -
 .../package/templates/zookeeper_jaas.conf.j2    |   25 -
 260 files changed, 1403 insertions(+), 21441 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/FLUME/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/FLUME/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/FLUME/metainfo.xml
index 13eba83..bebb54e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/FLUME/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/FLUME/metainfo.xml
@@ -24,6 +24,7 @@
         <component>
             <name>FLUME_SERVER</name>
             <category>MASTER</category>
+            <cardinality>1</cardinality>
         </component>
     </components>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/metainfo.xml
index 1a895b8..09d78a6 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/metainfo.xml
@@ -16,29 +16,91 @@
    limitations under the License.
 -->
 <metainfo>
-    <user>root</user>
-    <comment>Ganglia Metrics Collection system</comment>
-    <version>3.5.0</version>
-
-    <components>
-        <component>
-            <name>GANGLIA_SERVER</name>
-            <category>MASTER</category>
-        </component>
-
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>GANGLIA</name>
+      <comment>Ganglia Metrics Collection system</comment>
+      <version>3.5.0</version>
+      <components>
         <component>
-            <name>GANGLIA_MONITOR</name>
-            <category>SLAVE</category>
+          <name>GANGLIA_SERVER</name>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <commandScript>
+            <script>scripts/ganglia_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
 
         <component>
-            <name>MONITOR_WEBSERVER</name>
-            <category>MASTER</category>
+          <name>GANGLIA_MONITOR</name>
+          <category>SLAVE</category>
+          <cardinality>ALL</cardinality>
+          <auto-deploy>
+            <enabled>true</enabled>
+          </auto-deploy>
+          <commandScript>
+            <script>scripts/ganglia_monitor.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
-    </components>
-
-  <configuration-dependencies>
-    <config-type>global</config-type>
-  </configuration-dependencies>
-
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>libganglia-3.5.0-99</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>ganglia-devel-3.5.0-99</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>ganglia-gmetad-3.5.0-99</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>ganglia-web-3.5.7-99.noarch</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>python-rrdtool.x86_64</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>ganglia-gmond-3.5.0-99</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>ganglia-gmond-modules-python-3.5.0-99</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osType>suse</osType>
+          <package>
+            <type>rpm</type>
+            <name>apache2</name>
+          </package>
+          <package>
+            <type>rpm</type>
+            <name>apache2-mod_php5</name>
+          </package>
+        </osSpecific>
+        <osSpecific>
+          <osType>centos6</osType>
+          <package>
+            <type>rpm</type>
+            <name>httpd</name>
+          </package>
+        </osSpecific>
+      </osSpecifics>
+    </service>
+  </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/metainfo.xml
index 6643782..4c610db 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/metainfo.xml
@@ -16,29 +16,108 @@
    limitations under the License.
 -->
 <metainfo>
-    <user>mapred</user>
-    <comment>Non-relational distributed database and centralized service for configuration management &amp; synchronization</comment>
-    <version>0.94.6.1.3.3.0</version>
-
-    <components>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HBASE</name>
+      <comment>Non-relational distributed database and centralized service for configuration management &amp;
+        synchronization
+      </comment>
+      <version>0.94.6.1.3.3.0</version>
+      <components>
         <component>
-            <name>HBASE_MASTER</name>
-            <category>MASTER</category>
+          <name>HBASE_MASTER</name>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <dependencies>
+            <dependency>
+              <name>HDFS/HDFS_CLIENT</name>
+              <scope>host</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+              </auto-deploy>
+            </dependency>
+            <dependency>
+              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
+              <scope>cluster</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+                <co-locate>HBASE/HBASE_MASTER</co-locate>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/hbase_master.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <customCommands>
+            <customCommand>
+              <name>DECOMMISSION</name>
+              <commandScript>
+                <script>scripts/hbase_master.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+          </customCommands>
         </component>
 
         <component>
-            <name>HBASE_REGIONSERVER</name>
-            <category>SLAVE</category>
+          <name>HBASE_REGIONSERVER</name>
+          <category>SLAVE</category>
+          <cardinality>1+</cardinality>
+          <commandScript>
+            <script>scripts/hbase_regionserver.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+          <customCommands>
+            <customCommand>
+              <name>DECOMMISSION</name>
+              <commandScript>
+                <script>scripts/hbase_regionserver.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+          </customCommands>
         </component>
 
         <component>
-            <name>HBASE_CLIENT</name>
-            <category>CLIENT</category>
+          <name>HBASE_CLIENT</name>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <commandScript>
+            <script>scripts/hbase_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
         </component>
-    </components>
-    <configuration-dependencies>
-      <config-type>global</config-type>
-      <config-type>hbase-site</config-type>
-      <config-type>hbase-policy</config-type>
-    </configuration-dependencies>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osType>centos6</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>hbase</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>global</config-type>
+        <config-type>hbase-policy</config-type>
+        <config-type>hbase-site</config-type>
+      </configuration-dependencies>
+
+    </service>
+  </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HCATALOG/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HCATALOG/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HCATALOG/configuration/global.xml
deleted file mode 100644
index b0c7eb6..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HCATALOG/configuration/global.xml
+++ /dev/null
@@ -1,45 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>hcat_log_dir</name>
-    <value>/var/log/webhcat</value>
-    <description>WebHCat Log Dir.</description>
-  </property>
-  <property>
-    <name>hcat_pid_dir</name>
-    <value>/var/run/webhcat</value>
-    <description>WebHCat Pid Dir.</description>
-  </property>
-  <property>
-    <name>hcat_user</name>
-    <value>hcat</value>
-    <description>HCat User.</description>
-  </property>
-  <property>
-    <name>webhcat_user</name>
-    <value>hcat</value>
-    <description>WebHCat User.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HCATALOG/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HCATALOG/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HCATALOG/metainfo.xml
deleted file mode 100644
index 8e78530..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HCATALOG/metainfo.xml
+++ /dev/null
@@ -1,30 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <user>root</user>
-    <comment>This is comment for HCATALOG service</comment>
-    <version>0.11.0.1.3.3.0</version>
-
-    <components>
-        <component>
-            <name>HCAT</name>
-            <category>CLIENT</category>
-        </component>
-    </components>
-
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/hdfs-site.xml
index ac76122..1fc6c59 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/hdfs-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/configuration/hdfs-site.xml
@@ -22,7 +22,7 @@
 
 <configuration>
 
-<!-- file system properties -->
+  <!-- file system properties -->
 
   <property>
     <name>dfs.name.dir</name>
@@ -49,7 +49,7 @@
     <final>true</final>
   </property>
 
- <property>
+  <property>
     <name>dfs.datanode.socket.write.timeout</name>
     <value>0</value>
     <description>DFS Client write socket timeout</description>
@@ -66,7 +66,7 @@
     <name>dfs.block.local-path-access.user</name>
     <value>hbase</value>
     <description>the user who is allowed to perform short
-    circuit reads.
+      circuit reads.
     </description>
     <final>true</final>
   </property>
@@ -75,11 +75,11 @@
     <name>dfs.data.dir</name>
     <value>/hadoop/hdfs/data</value>
     <description>Determines where on the local filesystem an DFS data node
-  should store its blocks.  If this is a comma-delimited
-  list of directories, then data will be stored in all named
-  directories, typically on different devices.
-  Directories that do not exist are ignored.
-  </description>
+      should store its blocks.  If this is a comma-delimited
+      list of directories, then data will be stored in all named
+      directories, typically on different devices.
+      Directories that do not exist are ignored.
+    </description>
     <final>true</final>
   </property>
 
@@ -87,32 +87,32 @@
     <name>dfs.hosts.exclude</name>
     <value>/etc/hadoop/conf/dfs.exclude</value>
     <description>Names a file that contains a list of hosts that are
-    not permitted to connect to the namenode.  The full pathname of the
-    file must be specified.  If the value is empty, no hosts are
-    excluded.</description>
+      not permitted to connect to the namenode.  The full pathname of the
+      file must be specified.  If the value is empty, no hosts are
+      excluded.</description>
   </property>
 
   <property>
     <name>dfs.hosts</name>
     <value>/etc/hadoop/conf/dfs.include</value>
     <description>Names a file that contains a list of hosts that are
-    permitted to connect to the namenode. The full pathname of the file
-    must be specified.  If the value is empty, all hosts are
-    permitted.</description>
+      permitted to connect to the namenode. The full pathname of the file
+      must be specified.  If the value is empty, all hosts are
+      permitted.</description>
   </property>
 
   <property>
     <name>dfs.replication.max</name>
     <value>50</value>
     <description>Maximal block replication.
-  </description>
+    </description>
   </property>
 
   <property>
     <name>dfs.replication</name>
     <value>3</value>
     <description>Default block replication.
-  </description>
+    </description>
   </property>
 
   <property>
@@ -125,21 +125,21 @@
     <name>dfs.safemode.threshold.pct</name>
     <value>1.0f</value>
     <description>
-        Specifies the percentage of blocks that should satisfy
-        the minimal replication requirement defined by dfs.replication.min.
-        Values less than or equal to 0 mean not to start in safe mode.
-        Values greater than 1 will make safe mode permanent.
-        </description>
+      Specifies the percentage of blocks that should satisfy
+      the minimal replication requirement defined by dfs.replication.min.
+      Values less than or equal to 0 mean not to start in safe mode.
+      Values greater than 1 will make safe mode permanent.
+    </description>
   </property>
 
   <property>
     <name>dfs.balance.bandwidthPerSec</name>
     <value>6250000</value>
     <description>
-        Specifies the maximum amount of bandwidth that each datanode
-        can utilize for the balancing purpose in term of
-        the number of bytes per second.
-  </description>
+      Specifies the maximum amount of bandwidth that each datanode
+      can utilize for the balancing purpose in term of
+      the number of bytes per second.
+    </description>
   </property>
 
   <property>
@@ -191,133 +191,133 @@
   <property>
     <name>dfs.http.address</name>
     <value>localhost:50070</value>
-<description>The name of the default file system.  Either the
-literal string "local" or a host:port for NDFS.</description>
-<final>true</final>
-</property>
-
-<property>
-<name>dfs.datanode.du.reserved</name>
-<!-- cluster variant -->
-<value>1073741824</value>
-<description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
-</description>
-</property>
-
-<property>
-<name>dfs.datanode.ipc.address</name>
-<value>0.0.0.0:8010</value>
-<description>
-The datanode ipc server address and port.
-If the port is 0 then the server will start on a free port.
-</description>
-</property>
-
-<property>
-<name>dfs.blockreport.initialDelay</name>
-<value>120</value>
-<description>Delay for first block report in seconds.</description>
-</property>
-
-<property>
-<name>dfs.datanode.du.pct</name>
-<value>0.85f</value>
-<description>When calculating remaining space, only use this percentage of the real available space
-</description>
-</property>
-
-<property>
-<name>dfs.namenode.handler.count</name>
-<value>40</value>
-<description>The number of server threads for the namenode.</description>
-</property>
-
-<property>
-<name>dfs.datanode.max.xcievers</name>
-<value>4096</value>
-<description>PRIVATE CONFIG VARIABLE</description>
-</property>
-
-<!-- Permissions configuration -->
-
-<property>
-<name>dfs.umaskmode</name>
-<value>077</value>
-<description>
-The octal umask used when creating files and directories.
-</description>
-</property>
-
-<property>
-<name>dfs.web.ugi</name>
-<!-- cluster variant -->
-<value>gopher,gopher</value>
-<description>The user account used by the web interface.
-Syntax: USERNAME,GROUP1,GROUP2, ...
-</description>
-</property>
-
-<property>
-<name>dfs.permissions</name>
-<value>true</value>
-<description>
-If "true", enable permission checking in HDFS.
-If "false", permission checking is turned off,
-but all other behavior is unchanged.
-Switching from one parameter value to the other does not change the mode,
-owner or group of files or directories.
-</description>
-</property>
-
-<property>
-<name>dfs.permissions.supergroup</name>
-<value>hdfs</value>
-<description>The name of the group of super-users.</description>
-</property>
-
-<property>
-<name>dfs.namenode.handler.count</name>
-<value>100</value>
-<description>Added to grow Queue size so that more client connections are allowed</description>
-</property>
-
-<property>
-<name>ipc.server.max.response.size</name>
-<value>5242880</value>
-</property>
-<property>
-<name>dfs.block.access.token.enable</name>
-<value>true</value>
-<description>
-If "true", access tokens are used as capabilities for accessing datanodes.
-If "false", no access tokens are checked on accessing datanodes.
-</description>
-</property>
-
-<property>
-<name>dfs.namenode.kerberos.principal</name>
-<value></value>
-<description>
-Kerberos principal name for the NameNode
-</description>
-</property>
-
-<property>
-<name>dfs.secondary.namenode.kerberos.principal</name>
-<value></value>
+    <description>The name of the default file system.  Either the
+      literal string "local" or a host:port for NDFS.</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.datanode.du.reserved</name>
+    <!-- cluster variant -->
+    <value>1073741824</value>
+    <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.ipc.address</name>
+    <value>0.0.0.0:8010</value>
     <description>
-        Kerberos principal name for the secondary NameNode.
+      The datanode ipc server address and port.
+      If the port is 0 then the server will start on a free port.
     </description>
   </property>
 
+  <property>
+    <name>dfs.blockreport.initialDelay</name>
+    <value>120</value>
+    <description>Delay for first block report in seconds.</description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.du.pct</name>
+    <value>0.85f</value>
+    <description>When calculating remaining space, only use this percentage of the real available space
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.handler.count</name>
+    <value>40</value>
+    <description>The number of server threads for the namenode.</description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.max.xcievers</name>
+    <value>4096</value>
+    <description>PRIVATE CONFIG VARIABLE</description>
+  </property>
+
+  <!-- Permissions configuration -->
+
+  <property>
+    <name>dfs.umaskmode</name>
+    <value>077</value>
+    <description>
+      The octal umask used when creating files and directories.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.web.ugi</name>
+    <!-- cluster variant -->
+    <value>gopher,gopher</value>
+    <description>The user account used by the web interface.
+      Syntax: USERNAME,GROUP1,GROUP2, ...
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.permissions</name>
+    <value>true</value>
+    <description>
+      If "true", enable permission checking in HDFS.
+      If "false", permission checking is turned off,
+      but all other behavior is unchanged.
+      Switching from one parameter value to the other does not change the mode,
+      owner or group of files or directories.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.permissions.supergroup</name>
+    <value>hdfs</value>
+    <description>The name of the group of super-users.</description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.handler.count</name>
+    <value>100</value>
+    <description>Added to grow Queue size so that more client connections are allowed</description>
+  </property>
+
+  <property>
+    <name>ipc.server.max.response.size</name>
+    <value>5242880</value>
+  </property>
+  <property>
+    <name>dfs.block.access.token.enable</name>
+    <value>true</value>
+    <description>
+      If "true", access tokens are used as capabilities for accessing datanodes.
+      If "false", no access tokens are checked on accessing datanodes.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.kerberos.principal</name>
+    <value></value>
+    <description>
+      Kerberos principal name for the NameNode
+    </description>
+  </property>
 
-<!--
-  This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
--->
+  <property>
+    <name>dfs.secondary.namenode.kerberos.principal</name>
+    <value></value>
+    <description>
+      Kerberos principal name for the secondary NameNode.
+    </description>
+  </property>
+
+
+  <!--
+    This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
+  -->
   <property>
     <name>dfs.namenode.kerberos.https.principal</name>
     <value></value>
-     <description>The Kerberos principal for the host that the NameNode runs on.</description>
+    <description>The Kerberos principal for the host that the NameNode runs on.</description>
 
   </property>
 
@@ -363,84 +363,84 @@ Kerberos principal name for the NameNode
   <property>
     <name>dfs.datanode.kerberos.principal</name>
     <value></value>
- <description>
-        The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
+    <description>
+      The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
     </description>
   </property>
 
   <property>
     <name>dfs.namenode.keytab.file</name>
     <value></value>
- <description>
-        Combined keytab file containing the namenode service and host principals.
+    <description>
+      Combined keytab file containing the namenode service and host principals.
     </description>
   </property>
 
   <property>
     <name>dfs.secondary.namenode.keytab.file</name>
     <value></value>
-  <description>
-        Combined keytab file containing the namenode service and host principals.
+    <description>
+      Combined keytab file containing the namenode service and host principals.
     </description>
   </property>
 
   <property>
     <name>dfs.datanode.keytab.file</name>
     <value></value>
- <description>
-        The filename of the keytab file for the DataNode.
+    <description>
+      The filename of the keytab file for the DataNode.
     </description>
   </property>
 
   <property>
     <name>dfs.https.port</name>
     <value>50470</value>
- <description>The https port where namenode binds</description>
+    <description>The https port where namenode binds</description>
 
   </property>
 
   <property>
     <name>dfs.https.address</name>
     <value>localhost:50470</value>
-  <description>The https address where namenode binds</description>
+    <description>The https address where namenode binds</description>
 
   </property>
 
   <property>
     <name>dfs.datanode.data.dir.perm</name>
     <value>750</value>
-<description>The permissions that should be there on dfs.data.dir
-directories. The datanode will not come up if the permissions are
-different on existing dfs.data.dir directories. If the directories
-don't exist, they will be created with this permission.</description>
-  </property>
-
-  <property>
-  <name>dfs.access.time.precision</name>
-  <value>0</value>
-  <description>The access time for HDFS file is precise upto this value.
-               The default value is 1 hour. Setting a value of 0 disables
-               access times for HDFS.
-  </description>
-</property>
-
-<property>
- <name>dfs.cluster.administrators</name>
- <value> hdfs</value>
- <description>ACL for who all can view the default servlets in the HDFS</description>
-</property>
-
-<property>
-  <name>ipc.server.read.threadpool.size</name>
-  <value>5</value>
-  <description></description>
-</property>
-
-<property>
-  <name>dfs.datanode.failed.volumes.tolerated</name>
-  <value>0</value>
-  <description>Number of failed disks datanode would tolerate</description>
-</property>
+    <description>The permissions that should be there on dfs.data.dir
+      directories. The datanode will not come up if the permissions are
+      different on existing dfs.data.dir directories. If the directories
+      don't exist, they will be created with this permission.</description>
+  </property>
+
+  <property>
+    <name>dfs.access.time.precision</name>
+    <value>0</value>
+    <description>The access time for HDFS file is precise upto this value.
+      The default value is 1 hour. Setting a value of 0 disables
+      access times for HDFS.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.cluster.administrators</name>
+    <value> hdfs</value>
+    <description>ACL for who all can view the default servlets in the HDFS</description>
+  </property>
+
+  <property>
+    <name>ipc.server.read.threadpool.size</name>
+    <value>5</value>
+    <description></description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.failed.volumes.tolerated</name>
+    <value>0</value>
+    <description>Number of failed disks datanode would tolerate</description>
+  </property>
 
   <property>
     <name>dfs.namenode.avoid.read.stale.datanode</name>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/metainfo.xml
index 0bbab3e..009acae 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/metainfo.xml
@@ -16,35 +16,131 @@
    limitations under the License.
 -->
 <metainfo>
-    <user>root</user>
-    <comment>Apache Hadoop Distributed File System</comment>
-    <version>1.2.0.1.3.3.0</version>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HDFS</name>
+      <comment>Apache Hadoop Distributed File System</comment>
+      <version>1.2.0.1.3.3.0</version>
 
-    <components>
+      <components>
         <component>
-            <name>NAMENODE</name>
-            <category>MASTER</category>
+          <name>NAMENODE</name>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <commandScript>
+            <script>scripts/namenode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <customCommands>
+            <customCommand>
+              <name>DECOMMISSION</name>
+              <commandScript>
+                <script>scripts/namenode.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+          </customCommands>
         </component>
 
         <component>
-            <name>DATANODE</name>
-            <category>SLAVE</category>
+          <name>DATANODE</name>
+          <category>SLAVE</category>
+          <cardinality>1+</cardinality>
+          <commandScript>
+            <script>scripts/datanode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
 
         <component>
-            <name>SECONDARY_NAMENODE</name>
-            <category>MASTER</category>
+          <name>SECONDARY_NAMENODE</name>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <commandScript>
+            <script>scripts/snamenode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
 
         <component>
-            <name>HDFS_CLIENT</name>
-            <category>CLIENT</category>
+          <name>HDFS_CLIENT</name>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <commandScript>
+            <script>scripts/hdfs_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
-    </components>
-    <configuration-dependencies>
-      <config-type>core-site</config-type>
-      <config-type>global</config-type>
-      <config-type>hdfs-site</config-type>
-      <config-type>hadoop-policy</config-type>
-    </configuration-dependencies>
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>lzo</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-libhdfs</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-native</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-pipes</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-sbin</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-lzo</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-lzo-native</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>snappy</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>snappy-devel</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>ambari-log4j</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>core-site</config-type>
+        <config-type>global</config-type>
+        <config-type>hdfs-site</config-type>
+        <config-type>hadoop-policy</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/configuration/global.xml
index d9adc80..ae7f586 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/configuration/global.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/configuration/global.xml
@@ -121,5 +121,28 @@
     <value>hive</value>
     <description>Hive User.</description>
   </property>
+
+  <!--HCAT-->
+
+  <property>
+    <name>hcat_log_dir</name>
+    <value>/var/log/webhcat</value>
+    <description>WebHCat Log Dir.</description>
+  </property>
+  <property>
+    <name>hcat_pid_dir</name>
+    <value>/etc/run/webhcat</value>
+    <description>WebHCat Pid Dir.</description>
+  </property>
+  <property>
+    <name>hcat_user</name>
+    <value>hcat</value>
+    <description>HCat User.</description>
+  </property>
+  <property>
+    <name>webhcat_user</name>
+    <value>hcat</value>
+    <description>WebHCat User.</description>
+  </property>
   
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/configuration/hive-site.xml
index 24de30b..29ed54e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/configuration/hive-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/configuration/hive-site.xml
@@ -58,21 +58,21 @@ limitations under the License.
     <name>hive.metastore.sasl.enabled</name>
     <value></value>
     <description>If true, the metastore thrift interface will be secured with SASL.
-     Clients must authenticate with Kerberos.</description>
+      Clients must authenticate with Kerberos.</description>
   </property>
 
   <property>
     <name>hive.metastore.kerberos.keytab.file</name>
     <value></value>
     <description>The path to the Kerberos Keytab file containing the metastore
-     thrift server's service principal.</description>
+      thrift server's service principal.</description>
   </property>
 
   <property>
     <name>hive.metastore.kerberos.principal</name>
     <value></value>
     <description>The service principal for the metastore thrift server. The special
-    string _HOST will be replaced automatically with the correct host name.</description>
+      string _HOST will be replaced automatically with the correct host name.</description>
   </property>
 
   <property>
@@ -115,7 +115,7 @@ limitations under the License.
     <name>hive.security.authorization.manager</name>
     <value>org.apache.hcatalog.security.HdfsAuthorizationProvider</value>
     <description>the hive client authorization manager class name.
-    The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  </description>
+      The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  </description>
   </property>
 
   <property>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/metainfo.xml
index afeaae1..0a0f8fa 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/metainfo.xml
@@ -16,30 +16,171 @@
    limitations under the License.
 -->
 <metainfo>
-    <user>root</user>
-    <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
-    <version>0.11.0.1.3.3.0</version>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HIVE</name>
+      <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
+      <version>0.11.0.1.3.3.0</version>
+      <components>
 
-    <components>        
         <component>
-            <name>HIVE_METASTORE</name>
-            <category>MASTER</category>
+          <name>HIVE_METASTORE</name>
+          <category>MASTER</category>
+          <!-- may be 0 if specifying external metastore, how to specify this? -->
+          <cardinality>1</cardinality>
+          <auto-deploy>
+            <enabled>true</enabled>
+            <co-locate>HIVE/HIVE_SERVER</co-locate>
+          </auto-deploy>
+          <commandScript>
+            <script>scripts/hive_metastore.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
+
+        <component>
+          <name>HIVE_SERVER</name>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <dependencies>
+            <dependency>
+              <name>ZOOKEEPER/ZOOKEEPER_SERVER</name>
+              <scope>cluster</scope>
+              <auto-deploy>
+                <enabled>true</enabled>
+                <co-locate>HIVE/HIVE_SERVER</co-locate>
+              </auto-deploy>
+            </dependency>
+          </dependencies>
+          <commandScript>
+            <script>scripts/hive_server.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+        </component>
+
         <component>
-            <name>HIVE_SERVER</name>
-            <category>MASTER</category>
+          <name>MYSQL_SERVER</name>
+          <category>MASTER</category>
+          <!-- may be 0 if specifying external db, how to specify this? -->
+          <cardinality>1</cardinality>
+          <auto-deploy>
+            <enabled>true</enabled>
+            <co-locate>HIVE/HIVE_SERVER</co-locate>
+          </auto-deploy>
+          <commandScript>
+            <script>scripts/mysql_server.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
         </component>
+
         <component>
-            <name>MYSQL_SERVER</name>
-            <category>MASTER</category>
+          <name>HIVE_CLIENT</name>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality>
+          <commandScript>
+            <script>scripts/hive_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
         </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>hive</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>mysql-connector-java</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>mysql</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osType>centos6</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>mysql-server</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osType>centos5</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>mysql-server</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osType>suse</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>mysql-client</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>hive-site</config-type>
+        <config-type>global</config-type>
+      </configuration-dependencies>
+    </service>
+
+    <service>
+      <name>HCATALOG</name>
+      <comment>This is comment for HCATALOG service</comment>
+      <version>0.11.0.1.3.3.0</version>
+      <components>
         <component>
-            <name>HIVE_CLIENT</name>
-            <category>CLIENT</category>
+          <name>HCAT</name>
+          <category>CLIENT</category>
+          <commandScript>
+            <script>scripts/hcat_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
         </component>
-    </components>
-    <configuration-dependencies>
-      <config-type>global</config-type>
-      <config-type>hive-site</config-type>
-    </configuration-dependencies>
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>hcatalog</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>global</config-type>
+      </configuration-dependencies>
+
+    </service>
+
+  </services>
 </metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HUE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HUE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HUE/metainfo.xml
index ba580ca..0a6b59e 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HUE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HUE/metainfo.xml
@@ -25,6 +25,7 @@
         <component>
             <name>HUE_SERVER</name>
             <category>MASTER</category>
+            <cardinality>1</cardinality>
         </component>
     </components>
 

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/mapred-site.xml
index c4f6e39..1db37a8 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/mapred-site.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/configuration/mapred-site.xml
@@ -22,7 +22,7 @@
 
 <configuration xmlns:xi="http://www.w3.org/2001/XInclude">
 
-<!-- i/o properties -->
+  <!-- i/o properties -->
 
   <property>
     <name>io.sort.mb</name>
@@ -50,25 +50,25 @@
     <description>No description</description>
   </property>
 
-<!-- map/reduce properties -->
+  <!-- map/reduce properties -->
 
-<property>
-  <name>mapred.tasktracker.tasks.sleeptime-before-sigkill</name>
-  <value>250</value>
-  <description>Normally, this is the amount of time before killing
-  processes, and the recommended-default is 5.000 seconds - a value of
-  5000 here.  In this case, we are using it solely to blast tasks before
-  killing them, and killing them very quickly (1/4 second) to guarantee
-  that we do not leave VMs around for later jobs.
-  </description>
-</property>
+  <property>
+    <name>mapred.tasktracker.tasks.sleeptime-before-sigkill</name>
+    <value>250</value>
+    <description>Normally, this is the amount of time before killing
+      processes, and the recommended-default is 5.000 seconds - a value of
+      5000 here.  In this case, we are using it solely to blast tasks before
+      killing them, and killing them very quickly (1/4 second) to guarantee
+      that we do not leave VMs around for later jobs.
+    </description>
+  </property>
 
   <property>
     <name>mapred.job.tracker.handler.count</name>
     <value>50</value>
     <description>
-    The number of server threads for the JobTracker. This should be roughly
-    4% of the number of tasktracker nodes.
+      The number of server threads for the JobTracker. This should be roughly
+      4% of the number of tasktracker nodes.
     </description>
   </property>
 
@@ -104,8 +104,8 @@
   </property>
 
   <property>
-  <name>mapreduce.cluster.administrators</name>
-  <value> hadoop</value>
+    <name>mapreduce.cluster.administrators</name>
+    <value> hadoop</value>
   </property>
 
   <property>
@@ -135,14 +135,14 @@
     <name>mapred.map.tasks.speculative.execution</name>
     <value>false</value>
     <description>If true, then multiple instances of some map tasks
-               may be executed in parallel.</description>
+      may be executed in parallel.</description>
   </property>
 
   <property>
     <name>mapred.reduce.tasks.speculative.execution</name>
     <value>false</value>
     <description>If true, then multiple instances of some reduce tasks
-               may be executed in parallel.</description>
+      may be executed in parallel.</description>
   </property>
 
   <property>
@@ -154,29 +154,29 @@
     <name>mapred.inmem.merge.threshold</name>
     <value>1000</value>
     <description>The threshold, in terms of the number of files
-  for the in-memory merge process. When we accumulate threshold number of files
-  we initiate the in-memory merge and spill to disk. A value of 0 or less than
-  0 indicates we want to DON'T have any threshold and instead depend only on
-  the ramfs's memory consumption to trigger the merge.
-  </description>
+      for the in-memory merge process. When we accumulate threshold number of files
+      we initiate the in-memory merge and spill to disk. A value of 0 or less than
+      0 indicates we want to DON'T have any threshold and instead depend only on
+      the ramfs's memory consumption to trigger the merge.
+    </description>
   </property>
 
   <property>
     <name>mapred.job.shuffle.merge.percent</name>
     <value>0.66</value>
     <description>The usage threshold at which an in-memory merge will be
-  initiated, expressed as a percentage of the total memory allocated to
-  storing in-memory map outputs, as defined by
-  mapred.job.shuffle.input.buffer.percent.
-  </description>
+      initiated, expressed as a percentage of the total memory allocated to
+      storing in-memory map outputs, as defined by
+      mapred.job.shuffle.input.buffer.percent.
+    </description>
   </property>
 
   <property>
     <name>mapred.job.shuffle.input.buffer.percent</name>
     <value>0.7</value>
     <description>The percentage of memory to be allocated from the maximum heap
-  size to storing map outputs during the shuffle.
-  </description>
+      size to storing map outputs during the shuffle.
+    </description>
   </property>
 
   <property>
@@ -187,13 +187,13 @@
     </description>
   </property>
 
-<property>
-  <name>mapred.output.compression.type</name>
-  <value>BLOCK</value>
-  <description>If the job outputs are to compressed as SequenceFiles, how should
-               they be compressed? Should be one of NONE, RECORD or BLOCK.
-  </description>
-</property>
+  <property>
+    <name>mapred.output.compression.type</name>
+    <value>BLOCK</value>
+    <description>If the job outputs are to compressed as SequenceFiles, how should
+      they be compressed? Should be one of NONE, RECORD or BLOCK.
+    </description>
+  </property>
 
 
   <property>
@@ -210,7 +210,7 @@
     <name>mapred.jobtracker.restart.recover</name>
     <value>false</value>
     <description>"true" to enable (job) recovery upon restart,
-               "false" to start afresh
+      "false" to start afresh
     </description>
   </property>
 
@@ -218,20 +218,20 @@
     <name>mapred.job.reduce.input.buffer.percent</name>
     <value>0.0</value>
     <description>The percentage of memory- relative to the maximum heap size- to
-  retain map outputs during the reduce. When the shuffle is concluded, any
-  remaining map outputs in memory must consume less than this threshold before
-  the reduce can begin.
-  </description>
+      retain map outputs during the reduce. When the shuffle is concluded, any
+      remaining map outputs in memory must consume less than this threshold before
+      the reduce can begin.
+    </description>
   </property>
 
- <property>
-  <name>mapreduce.reduce.input.limit</name>
-  <value>10737418240</value>
-  <description>The limit on the input size of the reduce. (This value
-  is 10 Gb.)  If the estimated input size of the reduce is greater than
-  this value, job is failed. A value of -1 means that there is no limit
-  set. </description>
-</property>
+  <property>
+    <name>mapreduce.reduce.input.limit</name>
+    <value>10737418240</value>
+    <description>The limit on the input size of the reduce. (This value
+      is 10 Gb.)  If the estimated input size of the reduce is greater than
+      this value, job is failed. A value of -1 means that there is no limit
+      set. </description>
+  </property>
 
 
   <!-- copied from kryptonite configuration -->
@@ -245,9 +245,9 @@
     <name>mapred.task.timeout</name>
     <value>600000</value>
     <description>The number of milliseconds before a task will be
-  terminated if it neither reads an input, writes an output, nor
-  updates its status string.
-  </description>
+      terminated if it neither reads an input, writes an output, nor
+      updates its status string.
+    </description>
   </property>
 
   <property>
@@ -259,9 +259,9 @@
   <property>
     <name>mapred.task.tracker.task-controller</name>
     <value>org.apache.hadoop.mapred.DefaultTaskController</value>
-   <description>
-     TaskController which is used to launch and manage task execution.
-  </description>
+    <description>
+      TaskController which is used to launch and manage task execution.
+    </description>
   </property>
 
   <property>
@@ -279,7 +279,6 @@
   <property>
     <name>mapred.child.java.opts</name>
     <value>-server -Xmx${ambari.mapred.child.java.opts.memory}m -Djava.net.preferIPv4Stack=true</value>
-
     <description>Java options for the TaskTracker child processes</description>
   </property>
 
@@ -295,7 +294,7 @@
     <name>mapred.cluster.reduce.memory.mb</name>
     <value>2048</value>
     <description>
-    The virtual memory size of a single Reduce slot in the MapReduce framework
+      The virtual memory size of a single Reduce slot in the MapReduce framework
     </description>
   </property>
 
@@ -331,147 +330,147 @@
     </description>
   </property>
 
-<property>
-  <name>mapred.hosts</name>
-  <value>/etc/hadoop/conf/mapred.include</value>
-  <description>
-    Names a file that contains the list of nodes that may
-    connect to the jobtracker.  If the value is empty, all hosts are
-    permitted.
-  </description>
-</property>
-
-<property>
-  <name>mapred.hosts.exclude</name>
-  <value>/etc/hadoop/conf/mapred.exclude</value>
-  <description>
-    Names a file that contains the list of hosts that
-    should be excluded by the jobtracker.  If the value is empty, no
-    hosts are excluded.
-  </description>
-</property>
-
-<property>
-  <name>mapred.max.tracker.blacklists</name>
-  <value>16</value>
-  <description>
-    if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted
-  </description>
-</property>
-
-<property>
-  <name>mapred.healthChecker.script.path</name>
-  <value>/etc/hadoop/conf/health_check</value>
-  <description>
-    Directory path to view job status
-  </description>
-</property>
-
-<property>
-  <name>mapred.healthChecker.interval</name>
-  <value>135000</value>
-</property>
-
-<property>
-  <name>mapred.healthChecker.script.timeout</name>
-  <value>60000</value>
-</property>
-
-<property>
-  <name>mapred.job.tracker.persist.jobstatus.active</name>
-  <value>false</value>
-  <description>Indicates if persistency of job status information is
-  active or not.
-  </description>
-</property>
-
-<property>
-  <name>mapred.job.tracker.persist.jobstatus.hours</name>
-  <value>1</value>
-  <description>The number of hours job status information is persisted in DFS.
-    The job status information will be available after it drops of the memory
-    queue and between jobtracker restarts. With a zero value the job status
-    information is not persisted at all in DFS.
-  </description>
-</property>
-
-<property>
-  <name>mapred.job.tracker.persist.jobstatus.dir</name>
-  <value>/mapred/jobstatus</value>
-  <description>The directory where the job status information is persisted
-   in a file system to be available after it drops of the memory queue and
-   between jobtracker restarts.
-  </description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.retirejob.check</name>
-  <value>10000</value>
-</property>
-
-<property>
-  <name>mapred.jobtracker.retirejob.interval</name>
-  <value>21600000</value>
-</property>
-
-<property>
-  <name>mapred.job.tracker.history.completed.location</name>
-  <value>/mapred/history/done</value>
-  <description>No description</description>
-</property>
-
-<property>
-  <name>mapred.task.maxvmem</name>
-  <value></value>
-  <final>true</final>
-   <description>No description</description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.maxtasks.per.job</name>
-  <value>-1</value>
-  <final>true</final>
-  <description>The maximum number of tasks for a single job.
-  A value of -1 indicates that there is no maximum.  </description>
-</property>
-
-<property>
-  <name>mapreduce.fileoutputcommitter.marksuccessfuljobs</name>
-  <value>false</value>
-</property>
-
-<property>
-  <name>mapred.userlog.retain.hours</name>
-  <value>24</value>
-  <description>
-    The maximum time, in hours, for which the user-logs are to be retained after the job completion.
-  </description>
-</property>
-
-<property>
-  <name>mapred.job.reuse.jvm.num.tasks</name>
-  <value>1</value>
-  <description>
-    How many tasks to run per jvm. If set to -1, there is no limit
-  </description>
-  <final>true</final>
-</property>
-
-<property>
-  <name>mapreduce.jobtracker.kerberos.principal</name>
-  <value></value>
-  <description>
+  <property>
+    <name>mapred.hosts</name>
+    <value>/etc/hadoop/conf/mapred.include</value>
+    <description>
+      Names a file that contains the list of nodes that may
+      connect to the jobtracker.  If the value is empty, all hosts are
+      permitted.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.hosts.exclude</name>
+    <value>/etc/hadoop/conf/mapred.exclude</value>
+    <description>
+      Names a file that contains the list of hosts that
+      should be excluded by the jobtracker.  If the value is empty, no
+      hosts are excluded.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.max.tracker.blacklists</name>
+    <value>16</value>
+    <description>
+      if node is reported blacklisted by 16 successful jobs within timeout-window, it will be graylisted
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.healthChecker.script.path</name>
+    <value>/etc/hadoop/conf/health_check</value>
+    <description>
+      Directory path to view job status
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.healthChecker.interval</name>
+    <value>135000</value>
+  </property>
+
+  <property>
+    <name>mapred.healthChecker.script.timeout</name>
+    <value>60000</value>
+  </property>
+
+  <property>
+    <name>mapred.job.tracker.persist.jobstatus.active</name>
+    <value>false</value>
+    <description>Indicates if persistency of job status information is
+      active or not.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.job.tracker.persist.jobstatus.hours</name>
+    <value>1</value>
+    <description>The number of hours job status information is persisted in DFS.
+      The job status information will be available after it drops of the memory
+      queue and between jobtracker restarts. With a zero value the job status
+      information is not persisted at all in DFS.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.job.tracker.persist.jobstatus.dir</name>
+    <value>/mapred/jobstatus</value>
+    <description>The directory where the job status information is persisted
+      in a file system to be available after it drops of the memory queue and
+      between jobtracker restarts.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.jobtracker.retirejob.check</name>
+    <value>10000</value>
+  </property>
+
+  <property>
+    <name>mapred.jobtracker.retirejob.interval</name>
+    <value>21600000</value>
+  </property>
+
+  <property>
+    <name>mapred.job.tracker.history.completed.location</name>
+    <value>/mapred/history/done</value>
+    <description>No description</description>
+  </property>
+
+  <property>
+    <name>mapred.task.maxvmem</name>
+    <value></value>
+    <final>true</final>
+    <description>No description</description>
+  </property>
+
+  <property>
+    <name>mapred.jobtracker.maxtasks.per.job</name>
+    <value>-1</value>
+    <final>true</final>
+    <description>The maximum number of tasks for a single job.
+      A value of -1 indicates that there is no maximum.  </description>
+  </property>
+
+  <property>
+    <name>mapreduce.fileoutputcommitter.marksuccessfuljobs</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <name>mapred.userlog.retain.hours</name>
+    <value>24</value>
+    <description>
+      The maximum time, in hours, for which the user-logs are to be retained after the job completion.
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.job.reuse.jvm.num.tasks</name>
+    <value>1</value>
+    <description>
+      How many tasks to run per jvm. If set to -1, there is no limit
+    </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>mapreduce.jobtracker.kerberos.principal</name>
+    <value></value>
+    <description>
       JT user name key.
- </description>
-</property>
+    </description>
+  </property>
 
-<property>
-  <name>mapreduce.tasktracker.kerberos.principal</name>
-   <value></value>
-  <description>
-       tt user name key. "_HOST" is replaced by the host name of the task tracker.
-   </description>
-</property>
+  <property>
+    <name>mapreduce.tasktracker.kerberos.principal</name>
+    <value></value>
+    <description>
+      tt user name key. "_HOST" is replaced by the host name of the task tracker.
+    </description>
+  </property>
 
 
   <property>
@@ -481,54 +480,54 @@
   </property>
 
 
- <property>
-   <name>mapreduce.jobtracker.keytab.file</name>
-   <value></value>
-   <description>
-       The keytab for the jobtracker principal.
-   </description>
+  <property>
+    <name>mapreduce.jobtracker.keytab.file</name>
+    <value></value>
+    <description>
+      The keytab for the jobtracker principal.
+    </description>
 
-</property>
+  </property>
 
- <property>
-   <name>mapreduce.tasktracker.keytab.file</name>
-   <value></value>
+  <property>
+    <name>mapreduce.tasktracker.keytab.file</name>
+    <value></value>
     <description>The filename of the keytab for the task tracker</description>
- </property>
+  </property>
 
- <property>
-   <name>mapred.task.tracker.http.address</name>
-   <value></value>
-   <description>Http address for task tracker.</description>
- </property>
+  <property>
+    <name>mapred.task.tracker.http.address</name>
+    <value></value>
+    <description>Http address for task tracker.</description>
+  </property>
 
- <property>
-   <name>mapreduce.jobtracker.staging.root.dir</name>
-   <value>/user</value>
- <description>The Path prefix for where the staging directories should be placed. The next level is always the user's
-   name. It is a path in the default file system.</description>
- </property>
+  <property>
+    <name>mapreduce.jobtracker.staging.root.dir</name>
+    <value>/user</value>
+    <description>The Path prefix for where the staging directories should be placed. The next level is always the user's
+      name. It is a path in the default file system.</description>
+  </property>
 
- <property>
-      <name>mapreduce.tasktracker.group</name>
-      <value>hadoop</value>
-      <description>The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.</description>
+  <property>
+    <name>mapreduce.tasktracker.group</name>
+    <value>hadoop</value>
+    <description>The group that the task controller uses for accessing the task controller. The mapred user must be a member and users should *not* be members.</description>
 
- </property>
+  </property>
 
   <property>
     <name>mapreduce.jobtracker.split.metainfo.maxsize</name>
     <value>50000000</value>
     <final>true</final>
-     <description>If the size of the split metainfo file is larger than this, the JobTracker will fail the job during
-    initialize.
-   </description>
+    <description>If the size of the split metainfo file is larger than this, the JobTracker will fail the job during
+      initialize.
+    </description>
   </property>
   <property>
     <name>mapreduce.history.server.embedded</name>
     <value>false</value>
     <description>Should job history server be embedded within Job tracker
-process</description>
+      process</description>
     <final>true</final>
   </property>
 
@@ -543,61 +542,60 @@ process</description>
   <property>
     <name>mapreduce.jobhistory.kerberos.principal</name>
     <!-- cluster variant -->
-  <value></value>
+    <value></value>
     <description>Job history user name key. (must map to same user as JT
-user)</description>
+      user)</description>
   </property>
 
- <property>
-   <name>mapreduce.jobhistory.keytab.file</name>
+  <property>
+    <name>mapreduce.jobhistory.keytab.file</name>
     <!-- cluster variant -->
-   <value></value>
-   <description>The keytab for the job history server principal.</description>
- </property>
-
-<property>
-  <name>mapred.jobtracker.blacklist.fault-timeout-window</name>
-  <value>180</value>
-  <description>
-    3-hour sliding window (value is in minutes)
-  </description>
-</property>
-
-<property>
-  <name>mapred.jobtracker.blacklist.fault-bucket-width</name>
-  <value>15</value>
-  <description>
-    15-minute bucket size (value is in minutes)
-  </description>
-</property>
-
-<property>
-  <name>mapred.queue.names</name>
-  <value>default</value>
-  <description> Comma separated list of queues configured for this jobtracker.</description>
-</property>
+    <value></value>
+    <description>The keytab for the job history server principal.</description>
+  </property>
 
+  <property>
+    <name>mapred.jobtracker.blacklist.fault-timeout-window</name>
+    <value>180</value>
+    <description>
+      3-hour sliding window (value is in minutes)
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.jobtracker.blacklist.fault-bucket-width</name>
+    <value>15</value>
+    <description>
+      15-minute bucket size (value is in minutes)
+    </description>
+  </property>
+
+  <property>
+    <name>mapred.queue.names</name>
+    <value>default</value>
+    <description> Comma separated list of queues configured for this jobtracker.</description>
+  </property>
   
-<property>
-  <name>mapreduce.jobhistory.intermediate-done-dir</name>
-  <value>/mr-history/tmp</value>
-  <description>
-    Directory where history files are written by MapReduce jobs.
-  </description>
-</property>
-
-<property>
-  <name>mapreduce.jobhistory.done-dir</name>
-  <value>/mr-history/done</value>
-  <description>
-    Directory where history files are managed by the MR JobHistory Server.
-  </description>
-</property>
-
-<property>       
-  <name>mapreduce.jobhistory.webapp.address</name>
-  <value>localhost:19888</value>
-  <description>Enter your JobHistoryServer hostname.</description>
-</property>
+  <property>
+    <name>mapreduce.jobhistory.intermediate-done-dir</name>
+    <value>/mr-history/tmp</value>
+    <description>
+      Directory where history files are written by MapReduce jobs.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.jobhistory.done-dir</name>
+    <value>/mr-history/done</value>
+    <description>
+      Directory where history files are managed by the MR JobHistory Server.
+    </description>
+  </property>
+
+  <property>       
+    <name>mapreduce.jobhistory.webapp.address</name>
+    <value>localhost:19888</value>
+    <description>Enter your JobHistoryServer hostname.</description>
+  </property>
 
 </configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/metainfo.xml
index 2493a13..71783d7 100644
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/metainfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/metainfo.xml
@@ -15,30 +15,88 @@
    See the License for the specific language governing permissions and
    limitations under the License.
 -->
-<metainfo>
-    <user>mapred</user>
-    <comment>Apache Hadoop Distributed Processing Framework</comment>
-    <version>1.2.0.1.3.3.0</version>
 
-    <components>
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>MAPREDUCE</name>
+      <comment>Apache Hadoop Distributed Processing Framework</comment>
+      <version>1.2.0.1.3.3.0</version>
+      <components>
         <component>
-            <name>JOBTRACKER</name>
-            <category>MASTER</category>
+          <name>JOBTRACKER</name>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <commandScript>
+            <script>scripts/jobtracker.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <customCommands>
+            <customCommand>
+              <name>DECOMMISSION</name>
+              <commandScript>
+                <script>scripts/jobtracker.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+          </customCommands>
         </component>
 
         <component>
-            <name>TASKTRACKER</name>
-            <category>SLAVE</category>
+          <name>TASKTRACKER</name>
+          <category>SLAVE</category>
+          <cardinality>1+</cardinality>
+          <commandScript>
+            <script>scripts/tasktracker.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
 
         <component>
-            <name>MAPREDUCE_CLIENT</name>
-            <category>CLIENT</category>
+          <name>MAPREDUCE_CLIENT</name>
+          <category>CLIENT</category>
+          <cardinality>0+</cardinality> 
+          <commandScript>
+            <script>scripts/client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+              
+        <component>
+          <name>HISTORYSERVER</name>
+          <category>MASTER</category>
+          <cardinality>1</cardinality>
+          <auto-deploy>
+            <enabled>true</enabled>
+            <co-locate>MAPREDUCE/JOBTRACKER</co-locate>
+          </auto-deploy>
+          <commandScript>
+            <script>scripts/historyserver.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
         </component>
-    </components>
-    <configuration-dependencies>
-      <config-type>core-site</config-type>
-      <config-type>global</config-type>
-      <config-type>mapred-site</config-type>
-    </configuration-dependencies>
+      </components>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>capacity-scheduler</config-type>
+        <config-type>core-site</config-type>
+        <config-type>global</config-type>
+        <config-type>mapred-site</config-type>
+        <config-type>mapred-queue-acls</config-type>
+      </configuration-dependencies>
+    </service>
+
+  </services>
 </metainfo>


[03/12] AMBARI-4336. Move 1.3.4 stack to 1.3.3 using the python libraries. (mahadev)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/templates/nagios.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/templates/nagios.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/templates/nagios.cfg.j2
deleted file mode 100644
index acb2522..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/templates/nagios.cfg.j2
+++ /dev/null
@@ -1,1349 +0,0 @@
-##############################################################################
-#
-# NAGIOS.CFG - Sample Main Config File for Nagios 3.2.3
-#
-# Read the documentation for more information on this configuration
-# file.  I've provided some comments here, but things may not be so
-# clear without further explanation.
-#
-# Last Modified: 12-14-2008
-#
-##############################################################################
-
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-# LOG FILE
-# This is the main log file where service and host events are logged
-# for historical purposes.  This should be the first option specified 
-# in the config file!!!
-
-log_file=/var/log/nagios/nagios.log
-
-
-
-# OBJECT CONFIGURATION FILE(S)
-# These are the object configuration files in which you define hosts,
-# host groups, contacts, contact groups, services, etc.
-# You can split your object definitions across several config files
-# if you wish (as shown below), or keep them all in a single config file.
-
-# You can specify individual object config files as shown below:
-cfg_file=/etc/nagios/objects/commands.cfg
-cfg_file=/etc/nagios/objects/contacts.cfg
-cfg_file=/etc/nagios/objects/timeperiods.cfg
-cfg_file=/etc/nagios/objects/templates.cfg
-
-# Definitions for monitoring the local (Linux) host
-#cfg_file=/etc/nagios/objects/localhost.cfg
-
-# Definitions for monitoring a Windows machine
-#cfg_file=/etc/nagios/objects/windows.cfg
-
-# Definitions for monitoring a router/switch
-#cfg_file=/etc/nagios/objects/switch.cfg
-
-# Definitions for monitoring a network printer
-#cfg_file=/etc/nagios/objects/printer.cfg
-
-# Definitions for hadoop servers
-cfg_file={{nagios_host_cfg}}
-cfg_file={{nagios_hostgroup_cfg}}
-cfg_file={{nagios_servicegroup_cfg}}
-cfg_file={{nagios_service_cfg}}
-cfg_file={{nagios_command_cfg}}
-
-
-# You can also tell Nagios to process all config files (with a .cfg
-# extension) in a particular directory by using the cfg_dir
-# directive as shown below:
-
-#cfg_dir=/etc/nagios/servers
-#cfg_dir=/etc/nagios/printers
-#cfg_dir=/etc/nagios/switches
-#cfg_dir=/etc/nagios/routers
-
-
-
-
-# OBJECT CACHE FILE
-# This option determines where object definitions are cached when
-# Nagios starts/restarts.  The CGIs read object definitions from 
-# this cache file (rather than looking at the object config files
-# directly) in order to prevent inconsistencies that can occur
-# when the config files are modified after Nagios starts.
-
-object_cache_file=/var/nagios/objects.cache
-
-
-
-# PRE-CACHED OBJECT FILE
-# This options determines the location of the precached object file.
-# If you run Nagios with the -p command line option, it will preprocess
-# your object configuration file(s) and write the cached config to this
-# file.  You can then start Nagios with the -u option to have it read
-# object definitions from this precached file, rather than the standard
-# object configuration files (see the cfg_file and cfg_dir options above).
-# Using a precached object file can speed up the time needed to (re)start 
-# the Nagios process if you've got a large and/or complex configuration.
-# Read the documentation section on optimizing Nagios to find our more
-# about how this feature works.
-
-precached_object_file=/var/nagios/objects.precache
-
-
-
-# RESOURCE FILE
-# This is an optional resource file that contains $USERx$ macro
-# definitions. Multiple resource files can be specified by using
-# multiple resource_file definitions.  The CGIs will not attempt to
-# read the contents of resource files, so information that is
-# considered to be sensitive (usernames, passwords, etc) can be
-# defined as macros in this file and restrictive permissions (600)
-# can be placed on this file.
-
-resource_file={{nagios_resource_cfg}}
-
-
-
-# STATUS FILE
-# This is where the current status of all monitored services and
-# hosts is stored.  Its contents are read and processed by the CGIs.
-# The contents of the status file are deleted every time Nagios
-#  restarts.
-
-status_file=/var/nagios/status.dat
-
-
-
-# STATUS FILE UPDATE INTERVAL
-# This option determines the frequency (in seconds) that
-# Nagios will periodically dump program, host, and 
-# service status data.
-
-status_update_interval=10
-
-
-
-# NAGIOS USER
-# This determines the effective user that Nagios should run as.  
-# You can either supply a username or a UID.
-
-nagios_user={{nagios_user}}
-
-
-
-# NAGIOS GROUP
-# This determines the effective group that Nagios should run as.  
-# You can either supply a group name or a GID.
-
-nagios_group={{nagios_group}}
-
-
-
-# EXTERNAL COMMAND OPTION
-# This option allows you to specify whether or not Nagios should check
-# for external commands (in the command file defined below).  By default
-# Nagios will *not* check for external commands, just to be on the
-# cautious side.  If you want to be able to use the CGI command interface
-# you will have to enable this.
-# Values: 0 = disable commands, 1 = enable commands
-
-check_external_commands=1
-
-
-
-# EXTERNAL COMMAND CHECK INTERVAL
-# This is the interval at which Nagios should check for external commands.
-# This value works of the interval_length you specify later.  If you leave
-# that at its default value of 60 (seconds), a value of 1 here will cause
-# Nagios to check for external commands every minute.  If you specify a
-# number followed by an "s" (i.e. 15s), this will be interpreted to mean
-# actual seconds rather than a multiple of the interval_length variable.
-# Note: In addition to reading the external command file at regularly 
-# scheduled intervals, Nagios will also check for external commands after
-# event handlers are executed.
-# NOTE: Setting this value to -1 causes Nagios to check the external
-# command file as often as possible.
-
-#command_check_interval=15s
-command_check_interval=-1
-
-
-
-# EXTERNAL COMMAND FILE
-# This is the file that Nagios checks for external command requests.
-# It is also where the command CGI will write commands that are submitted
-# by users, so it must be writeable by the user that the web server
-# is running as (usually 'nobody').  Permissions should be set at the 
-# directory level instead of on the file, as the file is deleted every
-# time its contents are processed.
-
-command_file=/var/nagios/rw/nagios.cmd
-
-
-
-# EXTERNAL COMMAND BUFFER SLOTS
-# This settings is used to tweak the number of items or "slots" that
-# the Nagios daemon should allocate to the buffer that holds incoming 
-# external commands before they are processed.  As external commands 
-# are processed by the daemon, they are removed from the buffer.  
-
-external_command_buffer_slots=4096
-
-
-
-# LOCK FILE
-# This is the lockfile that Nagios will use to store its PID number
-# in when it is running in daemon mode.
-
-lock_file={{nagios_pid_file}}
-
-
-
-# TEMP FILE
-# This is a temporary file that is used as scratch space when Nagios
-# updates the status log, cleans the comment file, etc.  This file
-# is created, used, and deleted throughout the time that Nagios is
-# running.
-
-temp_file=/var/nagios/nagios.tmp
-
-
-
-# TEMP PATH
-# This is path where Nagios can create temp files for service and
-# host check results, etc.
-
-temp_path=/tmp
-
-
-
-# EVENT BROKER OPTIONS
-# Controls what (if any) data gets sent to the event broker.
-# Values:  0      = Broker nothing
-#         -1      = Broker everything
-#         <other> = See documentation
-
-event_broker_options=-1
-
-
-
-# EVENT BROKER MODULE(S)
-# This directive is used to specify an event broker module that should
-# by loaded by Nagios at startup.  Use multiple directives if you want
-# to load more than one module.  Arguments that should be passed to
-# the module at startup are seperated from the module path by a space.
-#
-#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-# WARNING !!! WARNING !!! WARNING !!! WARNING !!! WARNING !!! WARNING
-#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-#
-# Do NOT overwrite modules while they are being used by Nagios or Nagios
-# will crash in a fiery display of SEGFAULT glory.  This is a bug/limitation
-# either in dlopen(), the kernel, and/or the filesystem.  And maybe Nagios...
-#
-# The correct/safe way of updating a module is by using one of these methods:
-#    1. Shutdown Nagios, replace the module file, restart Nagios
-#    2. Delete the original module file, move the new module file into place, restart Nagios
-#
-# Example:
-#
-#   broker_module=<modulepath> [moduleargs]
-
-#broker_module=/somewhere/module1.o
-#broker_module=/somewhere/module2.o arg1 arg2=3 debug=0
-
-
-
-# LOG ROTATION METHOD
-# This is the log rotation method that Nagios should use to rotate
-# the main log file. Values are as follows..
-#	n	= None - don't rotate the log
-#	h	= Hourly rotation (top of the hour)
-#	d	= Daily rotation (midnight every day)
-#	w	= Weekly rotation (midnight on Saturday evening)
-#	m	= Monthly rotation (midnight last day of month)
-
-log_rotation_method=d
-
-
-
-# LOG ARCHIVE PATH
-# This is the directory where archived (rotated) log files should be 
-# placed (assuming you've chosen to do log rotation).
-
-log_archive_path=/var/log/nagios/archives
-
-
-
-# LOGGING OPTIONS
-# If you want messages logged to the syslog facility, as well as the
-# Nagios log file set this option to 1.  If not, set it to 0.
-
-use_syslog=1
-
-
-
-# NOTIFICATION LOGGING OPTION
-# If you don't want notifications to be logged, set this value to 0.
-# If notifications should be logged, set the value to 1.
-
-log_notifications=1
-
-
-
-# SERVICE RETRY LOGGING OPTION
-# If you don't want service check retries to be logged, set this value
-# to 0.  If retries should be logged, set the value to 1.
-
-log_service_retries=1
-
-
-
-# HOST RETRY LOGGING OPTION
-# If you don't want host check retries to be logged, set this value to
-# 0.  If retries should be logged, set the value to 1.
-
-log_host_retries=1
-
-
-
-# EVENT HANDLER LOGGING OPTION
-# If you don't want host and service event handlers to be logged, set
-# this value to 0.  If event handlers should be logged, set the value
-# to 1.
-
-log_event_handlers=1
-
-
-
-# INITIAL STATES LOGGING OPTION
-# If you want Nagios to log all initial host and service states to
-# the main log file (the first time the service or host is checked)
-# you can enable this option by setting this value to 1.  If you
-# are not using an external application that does long term state
-# statistics reporting, you do not need to enable this option.  In
-# this case, set the value to 0.
-
-log_initial_states=0
-
-
-
-# EXTERNAL COMMANDS LOGGING OPTION
-# If you don't want Nagios to log external commands, set this value
-# to 0.  If external commands should be logged, set this value to 1.
-# Note: This option does not include logging of passive service
-# checks - see the option below for controlling whether or not
-# passive checks are logged.
-
-log_external_commands=1
-
-
-
-# PASSIVE CHECKS LOGGING OPTION
-# If you don't want Nagios to log passive host and service checks, set
-# this value to 0.  If passive checks should be logged, set
-# this value to 1.
-
-log_passive_checks=1
-
-
-
-# GLOBAL HOST AND SERVICE EVENT HANDLERS
-# These options allow you to specify a host and service event handler
-# command that is to be run for every host or service state change.
-# The global event handler is executed immediately prior to the event
-# handler that you have optionally specified in each host or
-# service definition. The command argument is the short name of a
-# command definition that you define in your host configuration file.
-# Read the HTML docs for more information.
-
-#global_host_event_handler=somecommand
-#global_service_event_handler=somecommand
-
-
-
-# SERVICE INTER-CHECK DELAY METHOD
-# This is the method that Nagios should use when initially
-# "spreading out" service checks when it starts monitoring.  The
-# default is to use smart delay calculation, which will try to
-# space all service checks out evenly to minimize CPU load.
-# Using the dumb setting will cause all checks to be scheduled
-# at the same time (with no delay between them)!  This is not a
-# good thing for production, but is useful when testing the
-# parallelization functionality.
-#	n	= None - don't use any delay between checks
-#	d	= Use a "dumb" delay of 1 second between checks
-#	s	= Use "smart" inter-check delay calculation
-#       x.xx    = Use an inter-check delay of x.xx seconds
-
-service_inter_check_delay_method=s
-
-
-
-# MAXIMUM SERVICE CHECK SPREAD
-# This variable determines the timeframe (in minutes) from the
-# program start time that an initial check of all services should
-# be completed.  Default is 30 minutes.
-
-max_service_check_spread=30
-
-
-
-# SERVICE CHECK INTERLEAVE FACTOR
-# This variable determines how service checks are interleaved.
-# Interleaving the service checks allows for a more even
-# distribution of service checks and reduced load on remote
-# hosts.  Setting this value to 1 is equivalent to how versions
-# of Nagios previous to 0.0.5 did service checks.  Set this
-# value to s (smart) for automatic calculation of the interleave
-# factor unless you have a specific reason to change it.
-#       s       = Use "smart" interleave factor calculation
-#       x       = Use an interleave factor of x, where x is a
-#                 number greater than or equal to 1.
-
-service_interleave_factor=s
-
-
-
-# HOST INTER-CHECK DELAY METHOD
-# This is the method that Nagios should use when initially
-# "spreading out" host checks when it starts monitoring.  The
-# default is to use smart delay calculation, which will try to
-# space all host checks out evenly to minimize CPU load.
-# Using the dumb setting will cause all checks to be scheduled
-# at the same time (with no delay between them)!
-#	n	= None - don't use any delay between checks
-#	d	= Use a "dumb" delay of 1 second between checks
-#	s	= Use "smart" inter-check delay calculation
-#       x.xx    = Use an inter-check delay of x.xx seconds
-
-host_inter_check_delay_method=s
-
-
-
-# MAXIMUM HOST CHECK SPREAD
-# This variable determines the timeframe (in minutes) from the
-# program start time that an initial check of all hosts should
-# be completed.  Default is 30 minutes.
-
-max_host_check_spread=30
-
-
-
-# MAXIMUM CONCURRENT SERVICE CHECKS
-# This option allows you to specify the maximum number of 
-# service checks that can be run in parallel at any given time.
-# Specifying a value of 1 for this variable essentially prevents
-# any service checks from being parallelized.  A value of 0
-# will not restrict the number of concurrent checks that are
-# being executed.
-
-max_concurrent_checks=0
-
-
-
-# HOST AND SERVICE CHECK REAPER FREQUENCY
-# This is the frequency (in seconds!) that Nagios will process
-# the results of host and service checks.
-
-check_result_reaper_frequency=10
-
-
-
-
-# MAX CHECK RESULT REAPER TIME
-# This is the max amount of time (in seconds) that  a single
-# check result reaper event will be allowed to run before 
-# returning control back to Nagios so it can perform other
-# duties.
-
-max_check_result_reaper_time=30
-
-
-
-
-# CHECK RESULT PATH
-# This is directory where Nagios stores the results of host and
-# service checks that have not yet been processed.
-#
-# Note: Make sure that only one instance of Nagios has access
-# to this directory!  
-
-check_result_path=/var/nagios/spool/checkresults
-
-
-
-
-# MAX CHECK RESULT FILE AGE
-# This option determines the maximum age (in seconds) which check
-# result files are considered to be valid.  Files older than this 
-# threshold will be mercilessly deleted without further processing.
-
-max_check_result_file_age=3600
-
-
-
-
-# CACHED HOST CHECK HORIZON
-# This option determines the maximum amount of time (in seconds)
-# that the state of a previous host check is considered current.
-# Cached host states (from host checks that were performed more
-# recently that the timeframe specified by this value) can immensely
-# improve performance in regards to the host check logic.
-# Too high of a value for this option may result in inaccurate host
-# states being used by Nagios, while a lower value may result in a
-# performance hit for host checks.  Use a value of 0 to disable host
-# check caching.
-
-cached_host_check_horizon=15
-
-
-
-# CACHED SERVICE CHECK HORIZON
-# This option determines the maximum amount of time (in seconds)
-# that the state of a previous service check is considered current.
-# Cached service states (from service checks that were performed more
-# recently that the timeframe specified by this value) can immensely
-# improve performance in regards to predictive dependency checks.
-# Use a value of 0 to disable service check caching.
-
-cached_service_check_horizon=15
-
-
-
-# ENABLE PREDICTIVE HOST DEPENDENCY CHECKS
-# This option determines whether or not Nagios will attempt to execute
-# checks of hosts when it predicts that future dependency logic test
-# may be needed.  These predictive checks can help ensure that your
-# host dependency logic works well.
-# Values:
-#  0 = Disable predictive checks
-#  1 = Enable predictive checks (default)
-
-enable_predictive_host_dependency_checks=1
-
-
-
-# ENABLE PREDICTIVE SERVICE DEPENDENCY CHECKS
-# This option determines whether or not Nagios will attempt to execute
-# checks of service when it predicts that future dependency logic test
-# may be needed.  These predictive checks can help ensure that your
-# service dependency logic works well.
-# Values:
-#  0 = Disable predictive checks
-#  1 = Enable predictive checks (default)
-
-enable_predictive_service_dependency_checks=1
-
-
-
-# SOFT STATE DEPENDENCIES
-# This option determines whether or not Nagios will use soft state 
-# information when checking host and service dependencies. Normally 
-# Nagios will only use the latest hard host or service state when 
-# checking dependencies. If you want it to use the latest state (regardless
-# of whether its a soft or hard state type), enable this option. 
-# Values:
-#  0 = Don't use soft state dependencies (default) 
-#  1 = Use soft state dependencies 
-
-soft_state_dependencies=0
-
-
-
-# TIME CHANGE ADJUSTMENT THRESHOLDS
-# These options determine when Nagios will react to detected changes
-# in system time (either forward or backwards).
-
-#time_change_threshold=900
-
-
-
-# AUTO-RESCHEDULING OPTION
-# This option determines whether or not Nagios will attempt to
-# automatically reschedule active host and service checks to
-# "smooth" them out over time.  This can help balance the load on
-# the monitoring server.  
-# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
-# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
-
-auto_reschedule_checks=0
-
-
-
-# AUTO-RESCHEDULING INTERVAL
-# This option determines how often (in seconds) Nagios will
-# attempt to automatically reschedule checks.  This option only
-# has an effect if the auto_reschedule_checks option is enabled.
-# Default is 30 seconds.
-# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
-# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
-
-auto_rescheduling_interval=30
-
-
-
-# AUTO-RESCHEDULING WINDOW
-# This option determines the "window" of time (in seconds) that
-# Nagios will look at when automatically rescheduling checks.
-# Only host and service checks that occur in the next X seconds
-# (determined by this variable) will be rescheduled. This option
-# only has an effect if the auto_reschedule_checks option is
-# enabled.  Default is 180 seconds (3 minutes).
-# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
-# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
-
-auto_rescheduling_window=180
-
-
-
-# SLEEP TIME
-# This is the number of seconds to sleep between checking for system
-# events and service checks that need to be run.
-
-sleep_time=0.25
-
-
-
-# TIMEOUT VALUES
-# These options control how much time Nagios will allow various
-# types of commands to execute before killing them off.  Options
-# are available for controlling maximum time allotted for
-# service checks, host checks, event handlers, notifications, the
-# ocsp command, and performance data commands.  All values are in
-# seconds.
-
-service_check_timeout=60
-host_check_timeout=30
-event_handler_timeout=30
-notification_timeout=30
-ocsp_timeout=5
-perfdata_timeout=5
-
-
-
-# RETAIN STATE INFORMATION
-# This setting determines whether or not Nagios will save state
-# information for services and hosts before it shuts down.  Upon
-# startup Nagios will reload all saved service and host state
-# information before starting to monitor.  This is useful for 
-# maintaining long-term data on state statistics, etc, but will
-# slow Nagios down a bit when it (re)starts.  Since its only
-# a one-time penalty, I think its well worth the additional
-# startup delay.
-
-retain_state_information=1
-
-
-
-# STATE RETENTION FILE
-# This is the file that Nagios should use to store host and
-# service state information before it shuts down.  The state 
-# information in this file is also read immediately prior to
-# starting to monitor the network when Nagios is restarted.
-# This file is used only if the retain_state_information
-# variable is set to 1.
-
-state_retention_file=/var/nagios/retention.dat
-
-
-
-# RETENTION DATA UPDATE INTERVAL
-# This setting determines how often (in minutes) that Nagios
-# will automatically save retention data during normal operation.
-# If you set this value to 0, Nagios will not save retention
-# data at regular interval, but it will still save retention
-# data before shutting down or restarting.  If you have disabled
-# state retention, this option has no effect.
-
-retention_update_interval=60
-
-
-
-# USE RETAINED PROGRAM STATE
-# This setting determines whether or not Nagios will set 
-# program status variables based on the values saved in the
-# retention file.  If you want to use retained program status
-# information, set this value to 1.  If not, set this value
-# to 0.
-
-use_retained_program_state=1
-
-
-
-# USE RETAINED SCHEDULING INFO
-# This setting determines whether or not Nagios will retain
-# the scheduling info (next check time) for hosts and services
-# based on the values saved in the retention file.  If you
-# If you want to use retained scheduling info, set this
-# value to 1.  If not, set this value to 0.
-
-use_retained_scheduling_info=1
-
-
-
-# RETAINED ATTRIBUTE MASKS (ADVANCED FEATURE)
-# The following variables are used to specify specific host and
-# service attributes that should *not* be retained by Nagios during
-# program restarts.
-#
-# The values of the masks are bitwise ANDs of values specified
-# by the "MODATTR_" definitions found in include/common.h.  
-# For example, if you do not want the current enabled/disabled state
-# of flap detection and event handlers for hosts to be retained, you
-# would use a value of 24 for the host attribute mask...
-# MODATTR_EVENT_HANDLER_ENABLED (8) + MODATTR_FLAP_DETECTION_ENABLED (16) = 24
-
-# This mask determines what host attributes are not retained
-retained_host_attribute_mask=0
-
-# This mask determines what service attributes are not retained
-retained_service_attribute_mask=0
-
-# These two masks determine what process attributes are not retained.
-# There are two masks, because some process attributes have host and service
-# options.  For example, you can disable active host checks, but leave active
-# service checks enabled.
-retained_process_host_attribute_mask=0
-retained_process_service_attribute_mask=0
-
-# These two masks determine what contact attributes are not retained.
-# There are two masks, because some contact attributes have host and
-# service options.  For example, you can disable host notifications for
-# a contact, but leave service notifications enabled for them.
-retained_contact_host_attribute_mask=0
-retained_contact_service_attribute_mask=0
-
-
-
-# INTERVAL LENGTH
-# This is the seconds per unit interval as used in the
-# host/contact/service configuration files.  Setting this to 60 means
-# that each interval is one minute long (60 seconds).  Other settings
-# have not been tested much, so your mileage is likely to vary...
-
-interval_length=60
-
-
-
-# CHECK FOR UPDATES
-# This option determines whether Nagios will automatically check to
-# see if new updates (releases) are available.  It is recommend that you
-# enable this option to ensure that you stay on top of the latest critical
-# patches to Nagios.  Nagios is critical to you - make sure you keep it in
-# good shape.  Nagios will check once a day for new updates. Data collected
-# by Nagios Enterprises from the update check is processed in accordance 
-# with our privacy policy - see http://api.nagios.org for details.
-
-check_for_updates=1
-
-
-
-# BARE UPDATE CHECK
-# This option deterines what data Nagios will send to api.nagios.org when
-# it checks for updates.  By default, Nagios will send information on the 
-# current version of Nagios you have installed, as well as an indicator as
-# to whether this was a new installation or not.  Nagios Enterprises uses
-# this data to determine the number of users running specific version of 
-# Nagios.  Enable this option if you do not want this information to be sent.
-
-bare_update_check=0
-
-
-
-# AGGRESSIVE HOST CHECKING OPTION
-# If you don't want to turn on aggressive host checking features, set
-# this value to 0 (the default).  Otherwise set this value to 1 to
-# enable the aggressive check option.  Read the docs for more info
-# on what aggressive host check is or check out the source code in
-# base/checks.c
-
-use_aggressive_host_checking=0
-
-
-
-# SERVICE CHECK EXECUTION OPTION
-# This determines whether or not Nagios will actively execute
-# service checks when it initially starts.  If this option is 
-# disabled, checks are not actively made, but Nagios can still
-# receive and process passive check results that come in.  Unless
-# you're implementing redundant hosts or have a special need for
-# disabling the execution of service checks, leave this enabled!
-# Values: 1 = enable checks, 0 = disable checks
-
-execute_service_checks=1
-
-
-
-# PASSIVE SERVICE CHECK ACCEPTANCE OPTION
-# This determines whether or not Nagios will accept passive
-# service checks results when it initially (re)starts.
-# Values: 1 = accept passive checks, 0 = reject passive checks
-
-accept_passive_service_checks=1
-
-
-
-# HOST CHECK EXECUTION OPTION
-# This determines whether or not Nagios will actively execute
-# host checks when it initially starts.  If this option is 
-# disabled, checks are not actively made, but Nagios can still
-# receive and process passive check results that come in.  Unless
-# you're implementing redundant hosts or have a special need for
-# disabling the execution of host checks, leave this enabled!
-# Values: 1 = enable checks, 0 = disable checks
-
-execute_host_checks=1
-
-
-
-# PASSIVE HOST CHECK ACCEPTANCE OPTION
-# This determines whether or not Nagios will accept passive
-# host checks results when it initially (re)starts.
-# Values: 1 = accept passive checks, 0 = reject passive checks
-
-accept_passive_host_checks=1
-
-
-
-# NOTIFICATIONS OPTION
-# This determines whether or not Nagios will sent out any host or
-# service notifications when it is initially (re)started.
-# Values: 1 = enable notifications, 0 = disable notifications
-
-enable_notifications=1
-
-
-
-# EVENT HANDLER USE OPTION
-# This determines whether or not Nagios will run any host or
-# service event handlers when it is initially (re)started.  Unless
-# you're implementing redundant hosts, leave this option enabled.
-# Values: 1 = enable event handlers, 0 = disable event handlers
-
-enable_event_handlers=1
-
-
-
-# PROCESS PERFORMANCE DATA OPTION
-# This determines whether or not Nagios will process performance
-# data returned from service and host checks.  If this option is
-# enabled, host performance data will be processed using the
-# host_perfdata_command (defined below) and service performance
-# data will be processed using the service_perfdata_command (also
-# defined below).  Read the HTML docs for more information on
-# performance data.
-# Values: 1 = process performance data, 0 = do not process performance data
-
-process_performance_data=0
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA PROCESSING COMMANDS
-# These commands are run after every host and service check is
-# performed.  These commands are executed only if the
-# enable_performance_data option (above) is set to 1.  The command
-# argument is the short name of a command definition that you 
-# define in your host configuration file.  Read the HTML docs for
-# more information on performance data.
-
-#host_perfdata_command=process-host-perfdata
-#service_perfdata_command=process-service-perfdata
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILES
-# These files are used to store host and service performance data.
-# Performance data is only written to these files if the
-# enable_performance_data option (above) is set to 1.
-
-#host_perfdata_file=/tmp/host-perfdata
-#service_perfdata_file=/tmp/service-perfdata
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILE TEMPLATES
-# These options determine what data is written (and how) to the
-# performance data files.  The templates may contain macros, special
-# characters (\t for tab, \r for carriage return, \n for newline)
-# and plain text.  A newline is automatically added after each write
-# to the performance data file.  Some examples of what you can do are
-# shown below.
-
-#host_perfdata_file_template=[HOSTPERFDATA]\t$TIMET$\t$HOSTNAME$\t$HOSTEXECUTIONTIME$\t$HOSTOUTPUT$\t$HOSTPERFDATA$
-#service_perfdata_file_template=[SERVICEPERFDATA]\t$TIMET$\t$HOSTNAME$\t$SERVICEDESC$\t$SERVICEEXECUTIONTIME$\t$SERVICELATENCY$\t$SERVICEOUTPUT$\t$SERVICEPERFDATA$
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILE MODES
-# This option determines whether or not the host and service
-# performance data files are opened in write ("w") or append ("a")
-# mode. If you want to use named pipes, you should use the special
-# pipe ("p") mode which avoid blocking at startup, otherwise you will
-# likely want the defult append ("a") mode.
-
-#host_perfdata_file_mode=a
-#service_perfdata_file_mode=a
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILE PROCESSING INTERVAL
-# These options determine how often (in seconds) the host and service
-# performance data files are processed using the commands defined
-# below.  A value of 0 indicates the files should not be periodically
-# processed.
-
-#host_perfdata_file_processing_interval=0
-#service_perfdata_file_processing_interval=0
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILE PROCESSING COMMANDS
-# These commands are used to periodically process the host and
-# service performance data files.  The interval at which the
-# processing occurs is determined by the options above.
-
-#host_perfdata_file_processing_command=process-host-perfdata-file
-#service_perfdata_file_processing_command=process-service-perfdata-file
-
-
-
-# OBSESS OVER SERVICE CHECKS OPTION
-# This determines whether or not Nagios will obsess over service
-# checks and run the ocsp_command defined below.  Unless you're
-# planning on implementing distributed monitoring, do not enable
-# this option.  Read the HTML docs for more information on
-# implementing distributed monitoring.
-# Values: 1 = obsess over services, 0 = do not obsess (default)
-
-obsess_over_services=0
-
-
-
-# OBSESSIVE COMPULSIVE SERVICE PROCESSOR COMMAND
-# This is the command that is run for every service check that is
-# processed by Nagios.  This command is executed only if the
-# obsess_over_services option (above) is set to 1.  The command 
-# argument is the short name of a command definition that you
-# define in your host configuration file. Read the HTML docs for
-# more information on implementing distributed monitoring.
-
-#ocsp_command=somecommand
-
-
-
-# OBSESS OVER HOST CHECKS OPTION
-# This determines whether or not Nagios will obsess over host
-# checks and run the ochp_command defined below.  Unless you're
-# planning on implementing distributed monitoring, do not enable
-# this option.  Read the HTML docs for more information on
-# implementing distributed monitoring.
-# Values: 1 = obsess over hosts, 0 = do not obsess (default)
-
-obsess_over_hosts=0
-
-
-
-# OBSESSIVE COMPULSIVE HOST PROCESSOR COMMAND
-# This is the command that is run for every host check that is
-# processed by Nagios.  This command is executed only if the
-# obsess_over_hosts option (above) is set to 1.  The command 
-# argument is the short name of a command definition that you
-# define in your host configuration file. Read the HTML docs for
-# more information on implementing distributed monitoring.
-
-#ochp_command=somecommand
-
-
-
-# TRANSLATE PASSIVE HOST CHECKS OPTION
-# This determines whether or not Nagios will translate
-# DOWN/UNREACHABLE passive host check results into their proper
-# state for this instance of Nagios.  This option is useful
-# if you have distributed or failover monitoring setup.  In
-# these cases your other Nagios servers probably have a different
-# "view" of the network, with regards to the parent/child relationship
-# of hosts.  If a distributed monitoring server thinks a host
-# is DOWN, it may actually be UNREACHABLE from the point of
-# this Nagios instance.  Enabling this option will tell Nagios
-# to translate any DOWN or UNREACHABLE host states it receives
-# passively into the correct state from the view of this server.
-# Values: 1 = perform translation, 0 = do not translate (default)
-
-translate_passive_host_checks=0
-
-
-
-# PASSIVE HOST CHECKS ARE SOFT OPTION
-# This determines whether or not Nagios will treat passive host
-# checks as being HARD or SOFT.  By default, a passive host check
-# result will put a host into a HARD state type.  This can be changed
-# by enabling this option.
-# Values: 0 = passive checks are HARD, 1 = passive checks are SOFT
-
-passive_host_checks_are_soft=0
-
-
-
-# ORPHANED HOST/SERVICE CHECK OPTIONS
-# These options determine whether or not Nagios will periodically 
-# check for orphaned host service checks.  Since service checks are
-# not rescheduled until the results of their previous execution 
-# instance are processed, there exists a possibility that some
-# checks may never get rescheduled.  A similar situation exists for
-# host checks, although the exact scheduling details differ a bit
-# from service checks.  Orphaned checks seem to be a rare
-# problem and should not happen under normal circumstances.
-# If you have problems with service checks never getting
-# rescheduled, make sure you have orphaned service checks enabled.
-# Values: 1 = enable checks, 0 = disable checks
-
-check_for_orphaned_services=1
-check_for_orphaned_hosts=1
-
-
-
-# SERVICE FRESHNESS CHECK OPTION
-# This option determines whether or not Nagios will periodically
-# check the "freshness" of service results.  Enabling this option
-# is useful for ensuring passive checks are received in a timely
-# manner.
-# Values: 1 = enabled freshness checking, 0 = disable freshness checking
-
-check_service_freshness=1
-
-
-
-# SERVICE FRESHNESS CHECK INTERVAL
-# This setting determines how often (in seconds) Nagios will
-# check the "freshness" of service check results.  If you have
-# disabled service freshness checking, this option has no effect.
-
-service_freshness_check_interval=60
-
-
-
-# HOST FRESHNESS CHECK OPTION
-# This option determines whether or not Nagios will periodically
-# check the "freshness" of host results.  Enabling this option
-# is useful for ensuring passive checks are received in a timely
-# manner.
-# Values: 1 = enabled freshness checking, 0 = disable freshness checking
-
-check_host_freshness=0
-
-
-
-# HOST FRESHNESS CHECK INTERVAL
-# This setting determines how often (in seconds) Nagios will
-# check the "freshness" of host check results.  If you have
-# disabled host freshness checking, this option has no effect.
-
-host_freshness_check_interval=60
-
-
-
-
-# ADDITIONAL FRESHNESS THRESHOLD LATENCY
-# This setting determines the number of seconds that Nagios
-# will add to any host and service freshness thresholds that
-# it calculates (those not explicitly specified by the user).
-
-additional_freshness_latency=15
-
-
-
-
-# FLAP DETECTION OPTION
-# This option determines whether or not Nagios will try
-# and detect hosts and services that are "flapping".  
-# Flapping occurs when a host or service changes between
-# states too frequently.  When Nagios detects that a 
-# host or service is flapping, it will temporarily suppress
-# notifications for that host/service until it stops
-# flapping.  Flap detection is very experimental, so read
-# the HTML documentation before enabling this feature!
-# Values: 1 = enable flap detection
-#         0 = disable flap detection (default)
-
-enable_flap_detection=1
-
-
-
-# FLAP DETECTION THRESHOLDS FOR HOSTS AND SERVICES
-# Read the HTML documentation on flap detection for
-# an explanation of what this option does.  This option
-# has no effect if flap detection is disabled.
-
-low_service_flap_threshold=5.0
-high_service_flap_threshold=20.0
-low_host_flap_threshold=5.0
-high_host_flap_threshold=20.0
-
-
-
-# DATE FORMAT OPTION
-# This option determines how short dates are displayed. Valid options
-# include:
-#	us		(MM-DD-YYYY HH:MM:SS)
-#	euro    	(DD-MM-YYYY HH:MM:SS)
-#	iso8601		(YYYY-MM-DD HH:MM:SS)
-#	strict-iso8601	(YYYY-MM-DDTHH:MM:SS)
-#
-
-date_format=us
-
-
-
-
-# TIMEZONE OFFSET
-# This option is used to override the default timezone that this
-# instance of Nagios runs in.  If not specified, Nagios will use
-# the system configured timezone.
-#
-# NOTE: In order to display the correct timezone in the CGIs, you
-# will also need to alter the Apache directives for the CGI path 
-# to include your timezone.  Example:
-#
-#   <Directory "/usr/local/nagios/sbin/">
-#      SetEnv TZ "Australia/Brisbane"
-#      ...
-#   </Directory>
-
-#use_timezone=US/Mountain
-#use_timezone=Australia/Brisbane
-
-
-
-
-# P1.PL FILE LOCATION
-# This value determines where the p1.pl perl script (used by the
-# embedded Perl interpreter) is located.  If you didn't compile
-# Nagios with embedded Perl support, this option has no effect.
-
-p1_file = {{nagios_p1_pl}}
-
-
-
-# EMBEDDED PERL INTERPRETER OPTION
-# This option determines whether or not the embedded Perl interpreter
-# will be enabled during runtime.  This option has no effect if Nagios
-# has not been compiled with support for embedded Perl.
-# Values: 0 = disable interpreter, 1 = enable interpreter
-
-enable_embedded_perl=1
-
-
-
-# EMBEDDED PERL USAGE OPTION
-# This option determines whether or not Nagios will process Perl plugins
-# and scripts with the embedded Perl interpreter if the plugins/scripts
-# do not explicitly indicate whether or not it is okay to do so. Read
-# the HTML documentation on the embedded Perl interpreter for more 
-# information on how this option works.
-
-use_embedded_perl_implicitly=1
-
-
-
-# ILLEGAL OBJECT NAME CHARACTERS
-# This option allows you to specify illegal characters that cannot
-# be used in host names, service descriptions, or names of other
-# object types.
-
-illegal_object_name_chars=`~!$%^&*|'"<>?,()=
-
-
-
-# ILLEGAL MACRO OUTPUT CHARACTERS
-# This option allows you to specify illegal characters that are
-# stripped from macros before being used in notifications, event
-# handlers, etc.  This DOES NOT affect macros used in service or
-# host check commands.
-# The following macros are stripped of the characters you specify:
-#	$HOSTOUTPUT$
-#	$HOSTPERFDATA$
-#	$HOSTACKAUTHOR$
-#	$HOSTACKCOMMENT$
-#	$SERVICEOUTPUT$
-#	$SERVICEPERFDATA$
-#	$SERVICEACKAUTHOR$
-#	$SERVICEACKCOMMENT$
-
-illegal_macro_output_chars=`~$&|'"<>
-
-
-
-# REGULAR EXPRESSION MATCHING
-# This option controls whether or not regular expression matching
-# takes place in the object config files.  Regular expression
-# matching is used to match host, hostgroup, service, and service
-# group names/descriptions in some fields of various object types.
-# Values: 1 = enable regexp matching, 0 = disable regexp matching
-
-use_regexp_matching=0
-
-
-
-# "TRUE" REGULAR EXPRESSION MATCHING
-# This option controls whether or not "true" regular expression 
-# matching takes place in the object config files.  This option
-# only has an effect if regular expression matching is enabled
-# (see above).  If this option is DISABLED, regular expression
-# matching only occurs if a string contains wildcard characters
-# (* and ?).  If the option is ENABLED, regexp matching occurs
-# all the time (which can be annoying).
-# Values: 1 = enable true matching, 0 = disable true matching
-
-use_true_regexp_matching=0
-
-
-
-# ADMINISTRATOR EMAIL/PAGER ADDRESSES
-# The email and pager address of a global administrator (likely you).
-# Nagios never uses these values itself, but you can access them by
-# using the $ADMINEMAIL$ and $ADMINPAGER$ macros in your notification
-# commands.
-
-admin_email=nagios@localhost
-admin_pager=pagenagios@localhost
-
-
-
-# DAEMON CORE DUMP OPTION
-# This option determines whether or not Nagios is allowed to create
-# a core dump when it runs as a daemon.  Note that it is generally
-# considered bad form to allow this, but it may be useful for
-# debugging purposes.  Enabling this option doesn't guarantee that
-# a core file will be produced, but that's just life...
-# Values: 1 - Allow core dumps
-#         0 - Do not allow core dumps (default)
-
-daemon_dumps_core=0
-
-
-
-# LARGE INSTALLATION TWEAKS OPTION
-# This option determines whether or not Nagios will take some shortcuts
-# which can save on memory and CPU usage in large Nagios installations.
-# Read the documentation for more information on the benefits/tradeoffs
-# of enabling this option.
-# Values: 1 - Enabled tweaks
-#         0 - Disable tweaks (default)
-
-use_large_installation_tweaks=0
-
-
-
-# ENABLE ENVIRONMENT MACROS
-# This option determines whether or not Nagios will make all standard
-# macros available as environment variables when host/service checks
-# and system commands (event handlers, notifications, etc.) are
-# executed.  Enabling this option can cause performance issues in 
-# large installations, as it will consume a bit more memory and (more
-# importantly) consume more CPU.
-# Values: 1 - Enable environment variable macros (default)
-#         0 - Disable environment variable macros
-
-enable_environment_macros=1
-
-
-
-# CHILD PROCESS MEMORY OPTION
-# This option determines whether or not Nagios will free memory in
-# child processes (processed used to execute system commands and host/
-# service checks).  If you specify a value here, it will override
-# program defaults.
-# Value: 1 - Free memory in child processes
-#        0 - Do not free memory in child processes
-
-#free_child_process_memory=1
-
-
-
-# CHILD PROCESS FORKING BEHAVIOR
-# This option determines how Nagios will fork child processes
-# (used to execute system commands and host/service checks).  Normally
-# child processes are fork()ed twice, which provides a very high level
-# of isolation from problems.  Fork()ing once is probably enough and will
-# save a great deal on CPU usage (in large installs), so you might
-# want to consider using this.  If you specify a value here, it will
-# program defaults.
-# Value: 1 - Child processes fork() twice
-#        0 - Child processes fork() just once
-
-#child_processes_fork_twice=1
-
-
-
-# DEBUG LEVEL
-# This option determines how much (if any) debugging information will
-# be written to the debug file.  OR values together to log multiple
-# types of information.
-# Values: 
-#          -1 = Everything
-#          0 = Nothing
-#	   1 = Functions
-#          2 = Configuration
-#          4 = Process information
-#	   8 = Scheduled events
-#          16 = Host/service checks
-#          32 = Notifications
-#          64 = Event broker
-#          128 = External commands
-#          256 = Commands
-#          512 = Scheduled downtime
-#          1024 = Comments
-#          2048 = Macros
-
-debug_level=0
-
-
-
-# DEBUG VERBOSITY
-# This option determines how verbose the debug log out will be.
-# Values: 0 = Brief output
-#         1 = More detailed
-#         2 = Very detailed
-
-debug_verbosity=1
-
-
-
-# DEBUG FILE
-# This option determines where Nagios should write debugging information.
-
-debug_file=/var/log/nagios/nagios.debug
-
-
-
-# MAX DEBUG FILE SIZE
-# This option determines the maximum size (in bytes) of the debug file.  If
-# the file grows larger than this size, it will be renamed with a .old
-# extension.  If a file already exists with a .old extension it will
-# automatically be deleted.  This helps ensure your disk space usage doesn't
-# get out of control when debugging Nagios.
-
-max_debug_file_size=1000000
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/templates/nagios.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/templates/nagios.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/templates/nagios.conf.j2
deleted file mode 100644
index d8936a0..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/templates/nagios.conf.j2
+++ /dev/null
@@ -1,62 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-#
-# SAMPLE CONFIG SNIPPETS FOR APACHE WEB SERVER
-# Last Modified: 11-26-2005
-#
-# This file contains examples of entries that need
-# to be incorporated into your Apache web server
-# configuration file.  Customize the paths, etc. as
-# needed to fit your system.
-#
-
-ScriptAlias /nagios/cgi-bin "/usr/lib/nagios/cgi"
-
-<Directory "/usr/lib/nagios/cgi">
-#  SSLRequireSSL
-   Options ExecCGI
-   AllowOverride None
-   Order allow,deny
-   Allow from all
-#  Order deny,allow
-#  Deny from all
-#  Allow from 127.0.0.1
-   AuthName "Nagios Access"
-   AuthType Basic
-   AuthUserFile /etc/nagios/htpasswd.users
-   Require valid-user
-</Directory>
-
-Alias /nagios "/usr/share/nagios"
-
-<Directory "/usr/share/nagios">
-#  SSLRequireSSL
-   Options None
-   AllowOverride None
-   Order allow,deny
-   Allow from all
-#  Order deny,allow
-#  Deny from all
-#  Allow from 127.0.0.1
-   AuthName "Nagios Access"
-   AuthType Basic
-   AuthUserFile /etc/nagios/htpasswd.users
-   Require valid-user
-</Directory>
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/templates/nagios.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/templates/nagios.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/templates/nagios.j2
deleted file mode 100644
index 01e21ac..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/templates/nagios.j2
+++ /dev/null
@@ -1,146 +0,0 @@
-#!/bin/sh
-# $Id$
-# Nagios	Startup script for the Nagios monitoring daemon
-#
-# chkconfig:	- 85 15
-# description:	Nagios is a service monitoring system
-# processname: nagios
-# config: /etc/nagios/nagios.cfg
-# pidfile: /var/nagios/nagios.pid
-#
-### BEGIN INIT INFO
-# Provides:		nagios
-# Required-Start:	$local_fs $syslog $network
-# Required-Stop:	$local_fs $syslog $network
-# Short-Description:    start and stop Nagios monitoring server
-# Description:		Nagios is is a service monitoring system 
-### END INIT INFO
-
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-# Source function library.
-. /etc/rc.d/init.d/functions
-
-prefix="/usr"
-exec_prefix="/usr"
-exec="/usr/sbin/nagios"
-prog="nagios"
-config="/etc/nagios/nagios.cfg"
-pidfile="{{nagios_pid_file}}"
-user="{{nagios_user}}"
-
-[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog
-
-lockfile=/var/lock/subsys/$prog
-
-start() {
-    [ -x $exec ] || exit 5
-    [ -f $config ] || exit 6
-    echo -n $"Starting $prog: "
-    daemon --user=$user $exec -d $config
-    retval=$?
-    echo
-    [ $retval -eq 0 ] && touch $lockfile
-    return $retval
-}
-
-stop() {
-    echo -n $"Stopping $prog: "
-    killproc -d 10 $exec
-    retval=$?
-    echo
-    [ $retval -eq 0 ] && rm -f $lockfile
-    return $retval
-}
-
-
-restart() {
-    stop
-    start
-}
-
-reload() {
-    echo -n $"Reloading $prog: "
-    killproc $exec -HUP
-    RETVAL=$?
-    echo
-}
-
-force_reload() {
-    restart
-}
-
-check_config() {
-        $nice runuser -s /bin/bash - $user -c "$corelimit >/dev/null 2>&1 ; $exec -v $config > /dev/null 2>&1"
-        RETVAL=$?
-        if [ $RETVAL -ne 0 ] ; then
-                echo -n $"Configuration validation failed"
-                failure
-                echo
-                exit 1
-
-        fi
-}
-
-
-case "$1" in
-    start)
-        status $prog && exit 0
-	check_config
-        $1
-        ;;
-    stop)
-        status $prog|| exit 0
-        $1
-        ;;
-    restart)
-	check_config
-        $1
-        ;;
-    reload)
-        status $prog || exit 7
-	check_config
-        $1
-        ;;
-    force-reload)
-	check_config
-        force_reload
-        ;;
-    status)
-        status $prog
-        ;;
-    condrestart|try-restart)
-        status $prog|| exit 0
-	check_config
-        restart
-        ;;
-    configtest)
-        echo -n  $"Checking config for $prog: "
-        check_config && success
-        echo
-	;;
-    *)
-        echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload|configtest}"
-        exit 2
-esac
-exit $?

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/templates/resource.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/templates/resource.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/templates/resource.cfg.j2
deleted file mode 100644
index 920bfae..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/NAGIOS/package/templates/resource.cfg.j2
+++ /dev/null
@@ -1,51 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-###########################################################################
-#
-# RESOURCE.CFG - Sample Resource File for Nagios 3.2.3
-#
-# Last Modified: 09-10-2003
-#
-# You can define $USERx$ macros in this file, which can in turn be used
-# in command definitions in your host config file(s).  $USERx$ macros are
-# useful for storing sensitive information such as usernames, passwords,
-# etc.  They are also handy for specifying the path to plugins and
-# event handlers - if you decide to move the plugins or event handlers to
-# a different directory in the future, you can just update one or two
-# $USERx$ macros, instead of modifying a lot of command definitions.
-#
-# The CGIs will not attempt to read the contents of resource files, so
-# you can set restrictive permissions (600 or 660) on them.
-#
-# Nagios supports up to 32 $USERx$ macros ($USER1$ through $USER32$)
-#
-# Resource files may also be used to store configuration directives for
-# external data sources like MySQL...
-#
-###########################################################################
-
-# Sets $USER1$ to be the path to the plugins
-$USER1$={{plugins_dir}}
-
-# Sets $USER2$ to be the path to event handlers
-#$USER2$={{eventhandlers_dir}}
-
-# Store some usernames and passwords (hidden from the CGIs)
-#$USER3$=someuser
-#$USER4$=somepassword
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/configuration/global.xml
deleted file mode 100644
index ddbf780..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/configuration/global.xml
+++ /dev/null
@@ -1,105 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>oozie_user</name>
-    <value>oozie</value>
-    <description>Oozie User.</description>
-  </property>
-  <property>
-    <name>oozieserver_host</name>
-    <value></value>
-    <description>Oozie Server Host.</description>
-  </property>
-  <property>
-    <name>oozie_database</name>
-    <value></value>
-    <description>Oozie Server Database.</description>
-  </property>
-  <property>
-    <name>oozie_derby_database</name>
-    <value>Derby</value>
-    <description>Oozie Derby Database.</description>
-  </property>
-  <property>
-    <name>oozie_existing_mysql_database</name>
-    <value>MySQL</value>
-    <description>Oozie MySQL Database.</description>
-  </property>
-  <property>
-    <name>oozie_existing_mysql_host</name>
-    <value></value>
-    <description>Existing MySQL Host.</description>
-  </property>
-  <property>
-    <name>oozie_existing_oracle_database</name>
-    <value>Oracle</value>
-    <description>Oracle Database</description>
-  </property>
-  <property>
-    <name>oozie_existing_oracle_host</name>
-    <value></value>
-    <description>Database Host.</description>
-  </property>
-  <property>
-    <name>oozie_ambari_database</name>
-    <value>MySQL</value>
-    <description>Database default.</description>
-  </property>
-  <property>
-    <name>oozie_ambari_host</name>
-    <value></value>
-    <description>Host on which databse will be created.</description>
-  </property>
-  <property>
-    <name>oozie_database_name</name>
-    <value>oozie</value>
-    <description>Database name used for the Oozie.</description>
-  </property>
-  <property>
-    <name>oozie_metastore_user_name</name>
-    <value>oozie</value>
-    <description>Database user name to use to connect to the database</description>
-  </property>
-  <property>
-    <name>oozie_metastore_user_passwd</name>
-    <value></value>
-    <description>Database password to use to connect to the database</description>
-  </property>
-  <property>
-    <name>oozie_data_dir</name>
-    <value>/hadoop/oozie/data</value>
-    <description>Data directory in which the Oozie DB exists</description>
-  </property>
-  <property>
-    <name>oozie_log_dir</name>
-    <value>/var/log/oozie</value>
-    <description>Directory for oozie logs</description>
-  </property>
-  <property>
-    <name>oozie_pid_dir</name>
-    <value>/var/run/oozie</value>
-    <description>Directory in which the pid files for oozie reside.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/configuration/oozie-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/configuration/oozie-site.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/configuration/oozie-site.xml
deleted file mode 100644
index 57239c3..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/configuration/oozie-site.xml
+++ /dev/null
@@ -1,237 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-        
-       http://www.apache.org/licenses/LICENSE-2.0
-  
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->     
-
-<configuration>
-
-<!--
-    Refer to the oozie-default.xml file for the complete list of
-    Oozie configuration properties and their default values.
--->
-  <property>
-    <name>oozie.base.url</name>
-    <value>http://localhost:11000/oozie</value>
-    <description>Base Oozie URL.</description>
-   </property>
-
-  <property>
-    <name>oozie.system.id</name>
-    <value>oozie-${user.name}</value>
-    <description>
-    The Oozie system ID.
-    </description>
-   </property>
-
-   <property>
-     <name>oozie.systemmode</name>
-     <value>NORMAL</value>
-     <description>
-     System mode for  Oozie at startup.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.AuthorizationService.authorization.enabled</name>
-     <value>true</value>
-     <description>
-     Specifies whether security (user name/admin role) is enabled or not.
-     If disabled any user can manage Oozie system and manage any job.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.PurgeService.older.than</name>
-     <value>30</value>
-     <description>
-     Jobs older than this value, in days, will be purged by the PurgeService.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.PurgeService.purge.interval</name>
-     <value>3600</value>
-     <description>
-     Interval at which the purge service will run, in seconds.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.CallableQueueService.queue.size</name>
-     <value>1000</value>
-     <description>Max callable queue size</description>
-   </property>
-
-   <property>
-     <name>oozie.service.CallableQueueService.threads</name>
-     <value>10</value>
-     <description>Number of threads used for executing callables</description>
-   </property>
-
-   <property>
-     <name>oozie.service.CallableQueueService.callable.concurrency</name>
-     <value>3</value>
-     <description>
-     Maximum concurrency for a given callable type.
-     Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).
-     Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).
-     All commands that use action executors (action-start, action-end, action-kill and action-check) use
-     the action type as the callable type.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.coord.normal.default.timeout</name>
-     <value>120</value>
-     <description>Default timeout for a coordinator action input check (in minutes) for normal job.
-      -1 means infinite timeout</description>
-   </property>
-
-   <property>
-     <name>oozie.db.schema.name</name>
-     <value>oozie</value>
-     <description>
-      Oozie DataBase Name
-     </description>
-   </property>
-
-    <property>
-      <name>oozie.service.HadoopAccessorService.jobTracker.whitelist</name>
-      <value> </value>
-      <description>
-      Whitelisted job tracker for Oozie service.
-      </description>
-    </property>
-   
-    <property>
-      <name>oozie.authentication.type</name>
-      <value>simple</value>
-      <description>
-      </description>
-    </property>
-   
-    <property>
-      <name>oozie.service.HadoopAccessorService.nameNode.whitelist</name>
-      <value> </value>
-      <description>
-      </description>
-    </property>
-
-    <property>
-      <name>oozie.service.WorkflowAppService.system.libpath</name>
-      <value>/user/${user.name}/share/lib</value>
-      <description>
-      System library path to use for workflow applications.
-      This path is added to workflow application if their job properties sets
-      the property 'oozie.use.system.libpath' to true.
-      </description>
-    </property>
-
-    <property>
-      <name>use.system.libpath.for.mapreduce.and.pig.jobs</name>
-      <value>false</value>
-      <description>
-      If set to true, submissions of MapReduce and Pig jobs will include
-      automatically the system library path, thus not requiring users to
-      specify where the Pig JAR files are. Instead, the ones from the system
-      library path are used.
-      </description>
-    </property>
-    <property>
-      <name>oozie.authentication.kerberos.name.rules</name>
-      <value>DEFAULT</value>
-      <description>The mapping from kerberos principal names to local OS user names.</description>
-    </property>
-    <property>
-      <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
-      <value>*=/etc/hadoop/conf</value>
-      <description>
-          Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
-          the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
-          used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
-          the relevant Hadoop *-site.xml files. If the path is relative is looked within
-          the Oozie configuration directory; though the path can be absolute (i.e. to point
-          to Hadoop client conf/ directories in the local filesystem.
-      </description>
-    </property>
-    <property>
-        <name>oozie.service.ActionService.executor.ext.classes</name>
-        <value>org.apache.oozie.action.email.EmailActionExecutor,
-org.apache.oozie.action.hadoop.HiveActionExecutor,
-org.apache.oozie.action.hadoop.ShellActionExecutor,
-org.apache.oozie.action.hadoop.SqoopActionExecutor,
-org.apache.oozie.action.hadoop.DistcpActionExecutor</value>
-    </property>
-
-    <property>
-        <name>oozie.service.SchemaService.wf.ext.schemas</name>
-        <value>shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd</value>
-    </property>
-    <property>
-        <name>oozie.service.JPAService.create.db.schema</name>
-        <value>false</value>
-        <description>
-            Creates Oozie DB.
-
-            If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
-            If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.driver</name>
-        <value>org.apache.derby.jdbc.EmbeddedDriver</value>
-        <description>
-            JDBC driver class.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.url</name>
-        <value>jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true</value>
-        <description>
-            JDBC URL.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.username</name>
-        <value>oozie</value>
-        <description>
-            DB user name.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.password</name>
-        <value> </value>
-        <description>
-            DB user password.
-
-            IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,
-                       if empty Configuration assumes it is NULL.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.pool.max.active.conn</name>
-        <value>10</value>
-        <description>
-             Max number of connections.
-        </description>
-    </property>
-</configuration>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/metainfo.xml
deleted file mode 100644
index 487104d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/metainfo.xml
+++ /dev/null
@@ -1,113 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>OOZIE</name>
-      <comment>System for workflow coordination and execution of Apache Hadoop jobs.  This also includes the installation of the optional Oozie Web Console which relies on and will install the &lt;a target="_blank" href="http://www.sencha.com/products/extjs/license/"&gt;ExtJS&lt;/a&gt; Library.
-      </comment>
-      <version>3.3.2.1.3.3.0</version>
-      <components>
-        <component>
-          <name>OOZIE_SERVER</name>
-          <category>MASTER</category>
-          <cardinality>1</cardinality>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>MAPREDUCE/MAPREDUCE_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/oozie_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>OOZIE_CLIENT</name>
-          <category>CLIENT</category>
-          <cardinality>0+</cardinality>
-          <dependencies>
-            <dependency>
-              <name>HDFS/HDFS_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-            <dependency>
-              <name>MAPREDUCE/MAPREDUCE_CLIENT</name>
-              <scope>host</scope>
-              <auto-deploy>
-                <enabled>true</enabled>
-              </auto-deploy>
-            </dependency>
-          </dependencies>
-          <commandScript>
-            <script>scripts/oozie_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>oozie.noarch</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>oozie-client.noarch</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>extjs-2.2-1</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <configuration-dependencies>
-        <config-type>global</config-type>
-        <config-type>oozie-site</config-type>
-      </configuration-dependencies>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/package/files/oozieSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/package/files/oozieSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/package/files/oozieSmoke.sh
deleted file mode 100644
index 2446544..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/package/files/oozieSmoke.sh
+++ /dev/null
@@ -1,93 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-function getValueFromField {
-  xmllint $1 | grep "<name>$2</name>" -C 2 | grep '<value>' | cut -d ">" -f2 | cut -d "<" -f1
-  return $?
-}
-
-function checkOozieJobStatus {
-  local job_id=$1
-  local num_of_tries=$2
-  #default num_of_tries to 10 if not present
-  num_of_tries=${num_of_tries:-10}
-  local i=0
-  local rc=1
-  local cmd="source ${oozie_conf_dir}/oozie-env.sh ; /usr/bin/oozie job -oozie ${OOZIE_SERVER} -info $job_id"
-  su - ${smoke_test_user} -c "$cmd"
-  while [ $i -lt $num_of_tries ] ; do
-    cmd_output=`su - ${smoke_test_user} -c "$cmd"`
-    (IFS='';echo $cmd_output)
-    act_status=$(IFS='';echo $cmd_output | grep ^Status | cut -d':' -f2 | sed 's| ||g')
-    echo "workflow_status=$act_status"
-    if [ "RUNNING" == "$act_status" ]; then
-      #increment the couner and get the status again after waiting for 15 secs
-      sleep 15
-      (( i++ ))
-      elif [ "SUCCEEDED" == "$act_status" ]; then
-        rc=0;
-        break;
-      else
-        rc=1
-        break;
-      fi
-    done
-    return $rc
-}
-
-export oozie_conf_dir=$1
-export hadoop_conf_dir=$2
-export smoke_test_user=$3
-export security_enabled=$4
-export smoke_user_keytab=$5
-export kinit_path_local=$6
-
-export OOZIE_EXIT_CODE=0
-export JOBTRACKER=`getValueFromField ${hadoop_conf_dir}/mapred-site.xml mapred.job.tracker`
-export NAMENODE=`getValueFromField ${hadoop_conf_dir}/core-site.xml fs.default.name`
-export OOZIE_SERVER=`getValueFromField ${oozie_conf_dir}/oozie-site.xml oozie.base.url | tr '[:upper:]' '[:lower:]'`
-export OOZIE_EXAMPLES_DIR=`rpm -ql oozie-client | grep 'oozie-examples.tar.gz$' | xargs dirname`
-cd $OOZIE_EXAMPLES_DIR
-
-tar -zxf oozie-examples.tar.gz
-sed -i "s|nameNode=hdfs://localhost:8020|nameNode=$NAMENODE|g"  examples/apps/map-reduce/job.properties
-sed -i "s|nameNode=hdfs://localhost:9000|nameNode=$NAMENODE|g"  examples/apps/map-reduce/job.properties
-sed -i "s|jobTracker=localhost:8021|jobTracker=$JOBTRACKER|g" examples/apps/map-reduce/job.properties
-sed -i "s|jobTracker=localhost:9001|jobTracker=$JOBTRACKER|g" examples/apps/map-reduce/job.properties
-sed -i "s|oozie.wf.application.path=hdfs://localhost:9000|oozie.wf.application.path=$NAMENODE|g" examples/apps/map-reduce/job.properties
-
-if [[ $security_enabled == "true" ]]; then
-  kinitcmd="${kinit_path_local} -kt ${smoke_user_keytab} ${smoke_test_user}; "
-else 
-  kinitcmd=""
-fi
-
-su - ${smoke_test_user} -c "hadoop dfs -rmr examples"
-su - ${smoke_test_user} -c "hadoop dfs -rmr input-data"
-su - ${smoke_test_user} -c "hadoop dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples examples"
-su - ${smoke_test_user} -c "hadoop dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples/input-data input-data"
-
-cmd="${kinitcmd}source ${oozie_conf_dir}/oozie-env.sh ; /usr/bin/oozie job -oozie $OOZIE_SERVER -config $OOZIE_EXAMPLES_DIR/examples/apps/map-reduce/job.properties  -run"
-job_info=`su - ${smoke_test_user} -c "$cmd" | grep "job:"`
-job_id="`echo $job_info | cut -d':' -f2`"
-checkOozieJobStatus "$job_id"
-OOZIE_EXIT_CODE="$?"
-exit $OOZIE_EXIT_CODE

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/package/files/wrap_ooziedb.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/package/files/wrap_ooziedb.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/package/files/wrap_ooziedb.sh
deleted file mode 100644
index 97a513c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/package/files/wrap_ooziedb.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-OUT=`cd /var/tmp/oozie && /usr/lib/oozie/bin/ooziedb.sh "$@" 2>&1`
-EC=$?
-echo $OUT
-GRVAR=`echo ${OUT} | grep -o "java.lang.Exception: DB schema exists"`
-if [ ${EC} -ne 0 ] && [ -n "$GRVAR" ]
-then
-  exit 0
-else
-  exit $EC
-fi  

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/package/scripts/oozie.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/package/scripts/oozie.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/package/scripts/oozie.py
deleted file mode 100644
index 91da7ae..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/package/scripts/oozie.py
+++ /dev/null
@@ -1,99 +0,0 @@
-from resource_management import *
-
-def oozie(is_server=False
-              ):
-  import params
-
-  XmlConfig( "oozie-site.xml",
-    conf_dir = params.conf_dir, 
-    configurations = params.config['configurations']['oozie-site'],
-    owner = params.oozie_user,
-    group = params.user_group,
-    mode = 0664
-  )
-  
-  Directory( params.conf_dir,
-    owner = params.oozie_user,
-    group = params.user_group
-  )
-  
-  TemplateConfig( format("{conf_dir}/oozie-env.sh"),
-    owner = params.oozie_user
-  )
-  
-  TemplateConfig( format("{conf_dir}/oozie-log4j.properties"),
-    owner = params.oozie_user
-  )
-
-  if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
-    Execute(format("/bin/sh -c 'cd /usr/lib/ambari-agent/ &&\
-    curl -kf --retry 5 {jdk_location}{check_db_connection_jar_name}\
-     -o {check_db_connection_jar_name}'"),
-      not_if  = format("[ -f {check_db_connection_jar} ]")
-    )
-    
-  oozie_ownership( )
-  
-  if is_server:      
-    oozie_server_specific( )
-  
-def oozie_ownership(
-):
-  import params
-  
-  File ( format("{conf_dir}/adminusers.txt"),
-    owner = params.oozie_user,
-    group = params.user_group
-  )
-
-  File ( format("{conf_dir}/hadoop-config.xml"),
-    owner = params.oozie_user,
-    group = params.user_group
-  )
-
-  File ( format("{conf_dir}/oozie-default.xml"),
-    owner = params.oozie_user,
-    group = params.user_group
-  )
-
-  Directory ( format("{conf_dir}/action-conf"),
-    owner = params.oozie_user,
-    group = params.user_group
-  )
-
-  File ( format("{conf_dir}/action-conf/hive.xml"),
-    owner = params.oozie_user,
-    group = params.user_group
-  )
-  
-def oozie_server_specific(
-):
-  import params
-  
-  oozie_server_directorties = [params.oozie_pid_dir, params.oozie_log_dir, params.oozie_tmp_dir, params.oozie_data_dir, params.oozie_lib_dir, params.oozie_webapps_dir]            
-  Directory( oozie_server_directorties,
-    owner = params.oozie_user,
-    mode = 0755,
-    recursive = True
-  )
-       
-  cmd1 = "cd /usr/lib/oozie && tar -xvf oozie-sharelib.tar.gz"
-  cmd2 =  format("cd /usr/lib/oozie && mkdir -p {oozie_tmp_dir}")
-  
-  # this is different for HDP2
-  cmd3 = format("cd /usr/lib/oozie && chown {oozie_user}:{user_group} {oozie_tmp_dir}")
-  if params.jdbc_driver_name=="com.mysql.jdbc.Driver" or params.jdbc_driver_name=="oracle.jdbc.driver.OracleDriver":
-    cmd3 += format(" && mkdir -p {oozie_libext_dir} && cp {jdbc_driver_jar} {oozie_libext_dir}")
-    
-  # this is different for HDP2
-  cmd4 = format("cd {oozie_tmp_dir} && /usr/lib/oozie/bin/oozie-setup.sh -hadoop 0.20.200 {hadoop_jar_location} -extjs {ext_js_path} {jar_option} {jar_path}")
-  
-  no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
-  Execute( [cmd1, cmd2, cmd3],
-    not_if  = no_op_test
-  )
-  Execute( cmd4,
-    user = params.oozie_user,
-    not_if  = no_op_test
-  )
-  

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/package/scripts/oozie_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/package/scripts/oozie_client.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/package/scripts/oozie_client.py
deleted file mode 100644
index 23fdc12..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/package/scripts/oozie_client.py
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from oozie import oozie
-from oozie_service import oozie_service
-
-         
-class OozieClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-    
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    oozie(is_server=False)
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-    
-def main():
-  command_type = sys.argv[1] if len(sys.argv)>1 else "install"
-  print "Running "+command_type
-  command_data_file = '/root/workspace/Oozie/input.json'
-  basedir = '/root/workspace/Oozie/main'
-  sys.argv = ["", command_type, command_data_file, basedir]
-  
-  OozieClient().execute()
-  
-if __name__ == "__main__":
-  #main()
-  OozieClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/92583535/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/package/scripts/oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/package/scripts/oozie_server.py b/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/package/scripts/oozie_server.py
deleted file mode 100644
index eca2a56..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/1.3.4/services/OOZIE/package/scripts/oozie_server.py
+++ /dev/null
@@ -1,65 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from oozie import oozie
-from oozie_service import oozie_service
-
-         
-class OozieServer(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-    
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    oozie(is_server=True)
-    
-  def start(self, env):
-    import params
-    env.set_params(params)
-    oozie_service(action='start')
-    
-  def stop(self, env):
-    import params
-    env.set_params(params)
-    oozie_service(action='stop')
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.pid_file)
-    
-def main():
-  command_type = sys.argv[1] if len(sys.argv)>1 else "start"
-  print "Running "+command_type
-  command_data_file = '/root/workspace/Oozie/input.json'
-  basedir = '/root/workspace/Oozie/main'
-  sys.argv = ["", command_type, command_data_file, basedir]
-  
-  OozieServer().execute()
-  
-if __name__ == "__main__":
-  #main()
-  OozieServer().execute()