You are viewing a plain text version of this content. The canonical link for it is here.
Posted to commits@ambari.apache.org by ma...@apache.org on 2014/01/18 00:39:57 UTC

[01/37] AMBARI-4341. Rename 2.0.8 to 2.1.1 in the stack definition. (mahadev)

Updated Branches:
  refs/heads/trunk 92583535d -> ae534ed30


http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/templates/log4j.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/templates/log4j.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/templates/log4j.properties.j2
new file mode 100644
index 0000000..db69564
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/templates/log4j.properties.j2
@@ -0,0 +1,71 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+#
+# ZooKeeper Logging Configuration
+#
+
+# Format is "<default threshold> (, <appender>)+
+
+# DEFAULT: console appender only
+log4j.rootLogger=INFO, CONSOLE
+
+# Example with rolling log file
+#log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE
+
+# Example with rolling log file and tracing
+#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE
+
+#
+# Log INFO level and above messages to the console
+#
+log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
+log4j.appender.CONSOLE.Threshold=INFO
+log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
+log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n
+
+#
+# Add ROLLINGFILE to rootLogger to get log file output
+#    Log DEBUG level and above messages to a log file
+log4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender
+log4j.appender.ROLLINGFILE.Threshold=DEBUG
+log4j.appender.ROLLINGFILE.File=zookeeper.log
+
+# Max log file size of 10MB
+log4j.appender.ROLLINGFILE.MaxFileSize=10MB
+# uncomment the next line to limit number of backup files
+#log4j.appender.ROLLINGFILE.MaxBackupIndex=10
+
+log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout
+log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n
+
+
+#
+# Add TRACEFILE to rootLogger to get log file output
+#    Log DEBUG level and above messages to a log file
+log4j.appender.TRACEFILE=org.apache.log4j.FileAppender
+log4j.appender.TRACEFILE.Threshold=TRACE
+log4j.appender.TRACEFILE.File=zookeeper_trace.log
+
+log4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout
+### Notice we are including log4j's NDC here (%x)
+log4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/templates/zoo.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/templates/zoo.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/templates/zoo.cfg.j2
new file mode 100644
index 0000000..5b68218
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/templates/zoo.cfg.j2
@@ -0,0 +1,51 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+# The number of milliseconds of each tick
+tickTime={{tickTime}}
+# The number of ticks that the initial
+# synchronization phase can take
+initLimit={{initLimit}}
+# The number of ticks that can pass between
+# sending a request and getting an acknowledgement
+syncLimit={{syncLimit}}
+# the directory where the snapshot is stored.
+dataDir={{zk_data_dir}}
+# the port at which the clients will connect
+clientPort={{clientPort}}
+{% for host in zookeeper_hosts %}
+server.{{loop.index}}={{host}}:2888:3888
+{% endfor %}
+
+{% if security_enabled %}
+authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider
+jaasLoginRenew=3600000
+kerberos.removeHostFromPrincipal=true
+kerberos.removeRealmFromPrincipal=true
+{% endif %}
+
+{% if zoo_cfg_properties_map_length > 0 %}
+# Custom properties
+{% endif %}
+{% for key, value in zoo_cfg_properties_map.iteritems() %}
+{{key}}={{value}}
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/templates/zookeeper-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/templates/zookeeper-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/templates/zookeeper-env.sh.j2
new file mode 100644
index 0000000..493a2a4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/templates/zookeeper-env.sh.j2
@@ -0,0 +1,25 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+export JAVA_HOME={{java64_home}}
+export ZOO_LOG_DIR={{zk_log_dir}}
+export ZOOPIDFILE={{zk_pid_file}}
+export SERVER_JVMFLAGS={{zk_server_heapsize}}
+export JAVA=$JAVA_HOME/bin/java
+export CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*
+
+{% if security_enabled %}
+export SERVER_JVMFLAGS="$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}"
+export CLIENT_JVMFLAGS="$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}"
+{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2
new file mode 100644
index 0000000..696718e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2
@@ -0,0 +1,5 @@
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=false
+useTicketCache=true;
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2
new file mode 100644
index 0000000..aa123e1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2
@@ -0,0 +1,8 @@
+Server {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=true
+storeKey=true
+useTicketCache=false
+keyTab="{{zk_keytab_path}}"
+principal="{{zk_principal}}";
+};


[37/37] git commit: AMBARI-4341. Rename 2.0.8 to 2.1.1 in the stack definition. (mahadev)

Posted by ma...@apache.org.
AMBARI-4341. Rename 2.0.8 to 2.1.1 in the stack definition. (mahadev)


Project: http://git-wip-us.apache.org/repos/asf/ambari/repo
Commit: http://git-wip-us.apache.org/repos/asf/ambari/commit/ae534ed3
Tree: http://git-wip-us.apache.org/repos/asf/ambari/tree/ae534ed3
Diff: http://git-wip-us.apache.org/repos/asf/ambari/diff/ae534ed3

Branch: refs/heads/trunk
Commit: ae534ed3089f2650748c9563009007cae13d8e99
Parents: 9258353
Author: Mahadev Konar <ma...@apache.org>
Authored: Fri Jan 17 15:34:30 2014 -0800
Committer: Mahadev Konar <ma...@apache.org>
Committed: Fri Jan 17 15:34:30 2014 -0800

----------------------------------------------------------------------
 ambari-server/pom.xml                           |     2 +-
 ambari-server/set-hdp-repo-url.sh               |     2 +-
 .../before-INSTALL/files/changeToSecureUid.sh   |    50 +
 .../1.3.3/hooks/before-INSTALL/scripts/hook.py  |    36 +
 .../hooks/before-INSTALL/scripts/params.py      |    81 +
 .../scripts/shared_initialization.py            |   107 +
 .../hooks/before-START/files/checkForFormat.sh  |    62 +
 .../before-START/files/task-log4j.properties    |   132 +
 .../1.3.3/hooks/before-START/scripts/hook.py    |    37 +
 .../1.3.3/hooks/before-START/scripts/params.py  |   172 +
 .../scripts/shared_initialization.py            |   322 +
 .../templates/commons-logging.properties.j2     |    25 +
 .../templates/exclude_hosts_list.j2             |     3 +
 .../before-START/templates/hadoop-env.sh.j2     |   121 +
 .../templates/hadoop-metrics2.properties.j2     |    45 +
 .../hooks/before-START/templates/hdfs.conf.j2   |    17 +
 .../before-START/templates/health_check-v2.j2   |    91 +
 .../before-START/templates/health_check.j2      |   118 +
 .../templates/include_hosts_list.j2             |     3 +
 .../before-START/templates/log4j.properties.j2  |   200 +
 .../hooks/before-START/templates/slaves.j2      |     3 +
 .../hooks/before-START/templates/snmpd.conf.j2  |    48 +
 .../templates/taskcontroller.cfg.j2             |    20 +
 .../GANGLIA/package/files/checkGmetad.sh        |    37 +
 .../GANGLIA/package/files/checkGmond.sh         |    62 +
 .../GANGLIA/package/files/checkRrdcached.sh     |    34 +
 .../services/GANGLIA/package/files/gmetad.init  |    73 +
 .../services/GANGLIA/package/files/gmetadLib.sh |   204 +
 .../services/GANGLIA/package/files/gmond.init   |    73 +
 .../services/GANGLIA/package/files/gmondLib.sh  |   545 +
 .../1.3.3/services/GANGLIA/package/files/rrd.py |   213 +
 .../GANGLIA/package/files/rrdcachedLib.sh       |    47 +
 .../GANGLIA/package/files/setupGanglia.sh       |   141 +
 .../GANGLIA/package/files/startGmetad.sh        |    64 +
 .../GANGLIA/package/files/startGmond.sh         |    80 +
 .../GANGLIA/package/files/startRrdcached.sh     |    69 +
 .../GANGLIA/package/files/stopGmetad.sh         |    43 +
 .../services/GANGLIA/package/files/stopGmond.sh |    54 +
 .../GANGLIA/package/files/stopRrdcached.sh      |    41 +
 .../GANGLIA/package/files/teardownGanglia.sh    |    28 +
 .../services/GANGLIA/package/scripts/ganglia.py |   106 +
 .../GANGLIA/package/scripts/ganglia_monitor.py  |   163 +
 .../package/scripts/ganglia_monitor_service.py  |    31 +
 .../GANGLIA/package/scripts/ganglia_server.py   |   181 +
 .../package/scripts/ganglia_server_service.py   |    27 +
 .../services/GANGLIA/package/scripts/params.py  |    74 +
 .../GANGLIA/package/scripts/status_params.py    |    25 +
 .../package/templates/gangliaClusters.conf.j2   |    34 +
 .../GANGLIA/package/templates/gangliaEnv.sh.j2  |    24 +
 .../GANGLIA/package/templates/gangliaLib.sh.j2  |    62 +
 .../HBASE/package/files/hbaseSmokeVerify.sh     |    32 +
 .../services/HBASE/package/scripts/__init__.py  |    19 +
 .../services/HBASE/package/scripts/functions.py |    67 +
 .../services/HBASE/package/scripts/hbase.py     |    91 +
 .../HBASE/package/scripts/hbase_client.py       |    52 +
 .../HBASE/package/scripts/hbase_master.py       |    74 +
 .../HBASE/package/scripts/hbase_regionserver.py |    75 +
 .../HBASE/package/scripts/hbase_service.py      |    46 +
 .../services/HBASE/package/scripts/params.py    |    84 +
 .../HBASE/package/scripts/service_check.py      |    89 +
 .../HBASE/package/scripts/status_params.py      |    25 +
 .../hadoop-metrics.properties-GANGLIA-MASTER.j2 |    50 +
 .../hadoop-metrics.properties-GANGLIA-RS.j2     |    50 +
 .../templates/hadoop-metrics.properties.j2      |    50 +
 .../HBASE/package/templates/hbase-env.sh.j2     |    82 +
 .../HBASE/package/templates/hbase-smoke.sh.j2   |    26 +
 .../package/templates/hbase_client_jaas.conf.j2 |    23 +
 .../templates/hbase_grant_permissions.j2        |    21 +
 .../package/templates/hbase_master_jaas.conf.j2 |    25 +
 .../templates/hbase_regionserver_jaas.conf.j2   |    25 +
 .../HBASE/package/templates/regionservers.j2    |     2 +
 .../HDFS/package/files/checkForFormat.sh        |    62 +
 .../services/HDFS/package/files/checkWebUI.py   |    53 +
 .../services/HDFS/package/scripts/datanode.py   |    57 +
 .../HDFS/package/scripts/hdfs_client.py         |    52 +
 .../HDFS/package/scripts/hdfs_datanode.py       |    59 +
 .../HDFS/package/scripts/hdfs_namenode.py       |   192 +
 .../HDFS/package/scripts/hdfs_snamenode.py      |    53 +
 .../services/HDFS/package/scripts/namenode.py   |    66 +
 .../services/HDFS/package/scripts/params.py     |   165 +
 .../HDFS/package/scripts/service_check.py       |   106 +
 .../services/HDFS/package/scripts/snamenode.py  |    64 +
 .../HDFS/package/scripts/status_params.py       |    31 +
 .../services/HDFS/package/scripts/utils.py      |   133 +
 .../package/templates/exclude_hosts_list.j2     |     3 +
 .../services/HIVE/package/files/addMysqlUser.sh |    41 +
 .../services/HIVE/package/files/hcatSmoke.sh    |    35 +
 .../services/HIVE/package/files/hiveSmoke.sh    |    23 +
 .../services/HIVE/package/files/hiveserver2.sql |    23 +
 .../HIVE/package/files/hiveserver2Smoke.sh      |    31 +
 .../services/HIVE/package/files/pigSmoke.sh     |    18 +
 .../HIVE/package/files/startHiveserver2.sh      |    22 +
 .../HIVE/package/files/startMetastore.sh        |    22 +
 .../services/HIVE/package/scripts/__init__.py   |    19 +
 .../1.3.3/services/HIVE/package/scripts/hcat.py |    47 +
 .../HIVE/package/scripts/hcat_client.py         |    41 +
 .../HIVE/package/scripts/hcat_service_check.py  |    63 +
 .../1.3.3/services/HIVE/package/scripts/hive.py |   122 +
 .../HIVE/package/scripts/hive_client.py         |    41 +
 .../HIVE/package/scripts/hive_metastore.py      |    63 +
 .../HIVE/package/scripts/hive_server.py         |    63 +
 .../HIVE/package/scripts/hive_service.py        |    56 +
 .../HIVE/package/scripts/mysql_server.py        |    77 +
 .../HIVE/package/scripts/mysql_service.py       |    38 +
 .../services/HIVE/package/scripts/params.py     |   123 +
 .../HIVE/package/scripts/service_check.py       |    56 +
 .../HIVE/package/scripts/status_params.py       |    30 +
 .../HIVE/package/templates/hcat-env.sh.j2       |    25 +
 .../HIVE/package/templates/hive-env.sh.j2       |    55 +
 .../MAPREDUCE/package/scripts/client.py         |    43 +
 .../MAPREDUCE/package/scripts/historyserver.py  |    59 +
 .../MAPREDUCE/package/scripts/jobtracker.py     |   104 +
 .../MAPREDUCE/package/scripts/mapreduce.py      |    50 +
 .../MAPREDUCE/package/scripts/params.py         |    54 +
 .../MAPREDUCE/package/scripts/service.py        |    56 +
 .../MAPREDUCE/package/scripts/service_check.py  |    89 +
 .../MAPREDUCE/package/scripts/status_params.py  |    33 +
 .../MAPREDUCE/package/scripts/tasktracker.py    |   104 +
 .../package/templates/exclude_hosts_list.j2     |     3 +
 .../NAGIOS/package/files/check_aggregate.php    |   243 +
 .../services/NAGIOS/package/files/check_cpu.pl  |   114 +
 .../package/files/check_datanode_storage.php    |   100 +
 .../NAGIOS/package/files/check_hdfs_blocks.php  |   115 +
 .../package/files/check_hdfs_capacity.php       |   109 +
 .../files/check_hive_metastore_status.sh        |    45 +
 .../NAGIOS/package/files/check_hue_status.sh    |    31 +
 .../files/check_mapred_local_dir_used.sh        |    34 +
 .../package/files/check_name_dir_status.php     |    93 +
 .../NAGIOS/package/files/check_namenodes_ha.sh  |    82 +
 .../package/files/check_nodemanager_health.sh   |    44 +
 .../NAGIOS/package/files/check_oozie_status.sh  |    45 +
 .../NAGIOS/package/files/check_rpcq_latency.php |   104 +
 .../package/files/check_templeton_status.sh     |    45 +
 .../NAGIOS/package/files/check_webui.sh         |    87 +
 .../NAGIOS/package/files/hdp_nagios_init.php    |    81 +
 .../NAGIOS/package/scripts/functions.py         |    31 +
 .../services/NAGIOS/package/scripts/nagios.py   |    97 +
 .../NAGIOS/package/scripts/nagios_server.py     |    87 +
 .../package/scripts/nagios_server_config.py     |    91 +
 .../NAGIOS/package/scripts/nagios_service.py    |    36 +
 .../services/NAGIOS/package/scripts/params.py   |   168 +
 .../NAGIOS/package/scripts/status_params.py     |    26 +
 .../NAGIOS/package/templates/contacts.cfg.j2    |    91 +
 .../package/templates/hadoop-commands.cfg.j2    |   114 +
 .../package/templates/hadoop-hostgroups.cfg.j2  |    33 +
 .../package/templates/hadoop-hosts.cfg.j2       |    34 +
 .../templates/hadoop-servicegroups.cfg.j2       |    98 +
 .../package/templates/hadoop-services.cfg.j2    |   714 +
 .../NAGIOS/package/templates/nagios.cfg.j2      |  1349 ++
 .../NAGIOS/package/templates/nagios.conf.j2     |    62 +
 .../services/NAGIOS/package/templates/nagios.j2 |   146 +
 .../NAGIOS/package/templates/resource.cfg.j2    |    51 +
 .../services/OOZIE/package/files/oozieSmoke.sh  |    93 +
 .../OOZIE/package/files/wrap_ooziedb.sh         |    31 +
 .../services/OOZIE/package/scripts/oozie.py     |    99 +
 .../OOZIE/package/scripts/oozie_client.py       |    53 +
 .../OOZIE/package/scripts/oozie_server.py       |    65 +
 .../OOZIE/package/scripts/oozie_service.py      |    45 +
 .../services/OOZIE/package/scripts/params.py    |    64 +
 .../OOZIE/package/scripts/service_check.py      |    47 +
 .../OOZIE/package/scripts/status_params.py      |    26 +
 .../OOZIE/package/templates/oozie-env.sh.j2     |    64 +
 .../package/templates/oozie-log4j.properties.j2 |    74 +
 .../services/PIG/package/files/pigSmoke.sh      |    18 +
 .../services/PIG/package/scripts/params.py      |    36 +
 .../1.3.3/services/PIG/package/scripts/pig.py   |    46 +
 .../services/PIG/package/scripts/pig_client.py  |    52 +
 .../PIG/package/scripts/service_check.py        |    75 +
 .../PIG/package/templates/log4j.properties.j2   |    30 +
 .../PIG/package/templates/pig-env.sh.j2         |    17 +
 .../PIG/package/templates/pig.properties.j2     |    55 +
 .../services/SQOOP/package/scripts/__init__.py  |    18 +
 .../services/SQOOP/package/scripts/params.py    |    36 +
 .../SQOOP/package/scripts/service_check.py      |    36 +
 .../services/SQOOP/package/scripts/sqoop.py     |    51 +
 .../SQOOP/package/scripts/sqoop_client.py       |    40 +
 .../SQOOP/package/templates/sqoop-env.sh.j2     |    36 +
 .../WEBHCAT/package/files/templetonSmoke.sh     |    95 +
 .../WEBHCAT/package/scripts/__init__.py         |    21 +
 .../services/WEBHCAT/package/scripts/params.py  |    51 +
 .../WEBHCAT/package/scripts/service_check.py    |    45 +
 .../WEBHCAT/package/scripts/status_params.py    |    26 +
 .../services/WEBHCAT/package/scripts/webhcat.py |   120 +
 .../WEBHCAT/package/scripts/webhcat_server.py   |    54 +
 .../WEBHCAT/package/scripts/webhcat_service.py  |    41 +
 .../WEBHCAT/package/templates/webhcat-env.sh.j2 |    44 +
 .../services/ZOOKEEPER/package/files/zkEnv.sh   |    96 +
 .../ZOOKEEPER/package/files/zkServer.sh         |   120 +
 .../ZOOKEEPER/package/files/zkService.sh        |    26 +
 .../services/ZOOKEEPER/package/files/zkSmoke.sh |    78 +
 .../ZOOKEEPER/package/scripts/__init__.py       |    21 +
 .../ZOOKEEPER/package/scripts/params.py         |    71 +
 .../ZOOKEEPER/package/scripts/service_check.py  |    47 +
 .../ZOOKEEPER/package/scripts/status_params.py  |    26 +
 .../ZOOKEEPER/package/scripts/zookeeper.py      |    92 +
 .../package/scripts/zookeeper_client.py         |    43 +
 .../package/scripts/zookeeper_server.py         |    55 +
 .../package/scripts/zookeeper_service.py        |    43 +
 .../package/templates/configuration.xsl.j2      |    37 +
 .../package/templates/log4j.properties.j2       |    71 +
 .../ZOOKEEPER/package/templates/zoo.cfg.j2      |    51 +
 .../package/templates/zookeeper-env.sh.j2       |    25 +
 .../templates/zookeeper_client_jaas.conf.j2     |    22 +
 .../package/templates/zookeeper_jaas.conf.j2    |    25 +
 .../stacks/HDP/2.0.6/repos/repoinfo.xml         |    16 +-
 .../before-INSTALL/files/changeToSecureUid.sh   |    50 -
 .../2.0.8/hooks/before-INSTALL/scripts/hook.py  |    36 -
 .../hooks/before-INSTALL/scripts/params.py      |    84 -
 .../scripts/shared_initialization.py            |   113 -
 .../hooks/before-START/files/checkForFormat.sh  |    62 -
 .../before-START/files/task-log4j.properties    |   132 -
 .../2.0.8/hooks/before-START/scripts/hook.py    |    37 -
 .../2.0.8/hooks/before-START/scripts/params.py  |   172 -
 .../scripts/shared_initialization.py            |   327 -
 .../templates/commons-logging.properties.j2     |    25 -
 .../templates/exclude_hosts_list.j2             |     3 -
 .../before-START/templates/hadoop-env.sh.j2     |   121 -
 .../templates/hadoop-metrics2.properties.j2     |    45 -
 .../hooks/before-START/templates/hdfs.conf.j2   |    17 -
 .../before-START/templates/health_check-v2.j2   |    91 -
 .../before-START/templates/health_check.j2      |   118 -
 .../templates/include_hosts_list.j2             |     3 -
 .../before-START/templates/log4j.properties.j2  |   218 -
 .../hooks/before-START/templates/slaves.j2      |     3 -
 .../hooks/before-START/templates/snmpd.conf.j2  |    48 -
 .../templates/taskcontroller.cfg.j2             |    20 -
 .../resources/stacks/HDP/2.0.8/metainfo.xml     |    22 -
 .../stacks/HDP/2.0.8/repos/repoinfo.xml         |    75 -
 .../stacks/HDP/2.0.8/role_command_order.json    |   107 -
 .../services/FALCON/configuration/global.xml    |    42 -
 .../FALCON/configuration/oozie-site.xml         |   145 -
 .../HDP/2.0.8/services/FALCON/metainfo.xml      |    72 -
 .../services/FALCON/package/scripts/falcon.py   |    66 -
 .../FALCON/package/scripts/falcon_client.py     |    40 -
 .../FALCON/package/scripts/falcon_server.py     |    61 -
 .../services/FALCON/package/scripts/params.py   |    36 -
 .../FALCON/package/scripts/service_check.py     |    38 -
 .../FALCON/package/scripts/status_params.py     |    24 -
 .../package/templates/client.properties.j2      |    24 -
 .../package/templates/runtime.properties.j2     |    33 -
 .../package/templates/startup.properties.j2     |    70 -
 .../HDP/2.0.8/services/GANGLIA/metainfo.xml     |   101 -
 .../GANGLIA/package/files/checkGmetad.sh        |    37 -
 .../GANGLIA/package/files/checkGmond.sh         |    62 -
 .../GANGLIA/package/files/checkRrdcached.sh     |    34 -
 .../services/GANGLIA/package/files/gmetad.init  |    73 -
 .../services/GANGLIA/package/files/gmetadLib.sh |   204 -
 .../services/GANGLIA/package/files/gmond.init   |    73 -
 .../services/GANGLIA/package/files/gmondLib.sh  |   545 -
 .../2.0.8/services/GANGLIA/package/files/rrd.py |   213 -
 .../GANGLIA/package/files/rrdcachedLib.sh       |    47 -
 .../GANGLIA/package/files/setupGanglia.sh       |   141 -
 .../GANGLIA/package/files/startGmetad.sh        |    64 -
 .../GANGLIA/package/files/startGmond.sh         |    80 -
 .../GANGLIA/package/files/startRrdcached.sh     |    69 -
 .../GANGLIA/package/files/stopGmetad.sh         |    43 -
 .../services/GANGLIA/package/files/stopGmond.sh |    54 -
 .../GANGLIA/package/files/stopRrdcached.sh      |    41 -
 .../GANGLIA/package/files/teardownGanglia.sh    |    28 -
 .../services/GANGLIA/package/scripts/ganglia.py |    97 -
 .../GANGLIA/package/scripts/ganglia_monitor.py  |   176 -
 .../package/scripts/ganglia_monitor_service.py  |    31 -
 .../GANGLIA/package/scripts/ganglia_server.py   |   197 -
 .../package/scripts/ganglia_server_service.py   |    27 -
 .../services/GANGLIA/package/scripts/params.py  |    80 -
 .../GANGLIA/package/scripts/status_params.py    |    25 -
 .../package/templates/gangliaClusters.conf.j2   |    35 -
 .../GANGLIA/package/templates/gangliaEnv.sh.j2  |    24 -
 .../GANGLIA/package/templates/gangliaLib.sh.j2  |    62 -
 .../services/HBASE/configuration/global.xml     |   160 -
 .../HBASE/configuration/hbase-policy.xml        |    53 -
 .../services/HBASE/configuration/hbase-site.xml |   356 -
 .../HDP/2.0.8/services/HBASE/metainfo.xml       |    93 -
 .../HDP/2.0.8/services/HBASE/metrics.json       | 13635 -----------------
 .../HBASE/package/files/hbaseSmokeVerify.sh     |    32 -
 .../services/HBASE/package/scripts/__init__.py  |    19 -
 .../services/HBASE/package/scripts/functions.py |    67 -
 .../services/HBASE/package/scripts/hbase.py     |    91 -
 .../HBASE/package/scripts/hbase_client.py       |    52 -
 .../HBASE/package/scripts/hbase_master.py       |    74 -
 .../HBASE/package/scripts/hbase_regionserver.py |    75 -
 .../HBASE/package/scripts/hbase_service.py      |    46 -
 .../services/HBASE/package/scripts/params.py    |    84 -
 .../HBASE/package/scripts/service_check.py      |    89 -
 .../HBASE/package/scripts/status_params.py      |    25 -
 ...-metrics2-hbase.properties-GANGLIA-MASTER.j2 |    62 -
 ...doop-metrics2-hbase.properties-GANGLIA-RS.j2 |    62 -
 .../HBASE/package/templates/hbase-env.sh.j2     |    82 -
 .../HBASE/package/templates/hbase-smoke.sh.j2   |    26 -
 .../package/templates/hbase_client_jaas.conf.j2 |     5 -
 .../templates/hbase_grant_permissions.j2        |    21 -
 .../package/templates/hbase_master_jaas.conf.j2 |     8 -
 .../templates/hbase_regionserver_jaas.conf.j2   |     8 -
 .../HBASE/package/templates/regionservers.j2    |     2 -
 .../services/HDFS/configuration/core-site.xml   |   167 -
 .../services/HDFS/configuration/global.xml      |   192 -
 .../HDFS/configuration/hadoop-policy.xml        |   134 -
 .../services/HDFS/configuration/hdfs-site.xml   |   513 -
 .../stacks/HDP/2.0.8/services/HDFS/metainfo.xml |   152 -
 .../stacks/HDP/2.0.8/services/HDFS/metrics.json |  7800 ----------
 .../HDFS/package/files/checkForFormat.sh        |    62 -
 .../services/HDFS/package/files/checkWebUI.py   |    53 -
 .../services/HDFS/package/scripts/datanode.py   |    57 -
 .../HDFS/package/scripts/hdfs_client.py         |    52 -
 .../HDFS/package/scripts/hdfs_datanode.py       |    56 -
 .../HDFS/package/scripts/hdfs_namenode.py       |   212 -
 .../HDFS/package/scripts/hdfs_snamenode.py      |    53 -
 .../HDFS/package/scripts/journalnode.py         |    74 -
 .../services/HDFS/package/scripts/namenode.py   |    68 -
 .../services/HDFS/package/scripts/params.py     |   188 -
 .../HDFS/package/scripts/service_check.py       |   107 -
 .../services/HDFS/package/scripts/snamenode.py  |    64 -
 .../HDFS/package/scripts/status_params.py       |    31 -
 .../services/HDFS/package/scripts/utils.py      |   138 -
 .../services/HDFS/package/scripts/zkfc_slave.py |    62 -
 .../package/templates/exclude_hosts_list.j2     |     3 -
 .../services/HIVE/configuration/hive-site.xml   |   267 -
 .../stacks/HDP/2.0.8/services/HIVE/metainfo.xml |   156 -
 .../services/HIVE/package/files/addMysqlUser.sh |    41 -
 .../services/HIVE/package/files/hcatSmoke.sh    |    35 -
 .../services/HIVE/package/files/hiveSmoke.sh    |    23 -
 .../services/HIVE/package/files/hiveserver2.sql |    23 -
 .../HIVE/package/files/hiveserver2Smoke.sh      |    31 -
 .../services/HIVE/package/files/pigSmoke.sh     |    18 -
 .../HIVE/package/files/startHiveserver2.sh      |    22 -
 .../HIVE/package/files/startMetastore.sh        |    22 -
 .../services/HIVE/package/scripts/__init__.py   |    19 -
 .../2.0.8/services/HIVE/package/scripts/hcat.py |    47 -
 .../HIVE/package/scripts/hcat_client.py         |    43 -
 .../HIVE/package/scripts/hcat_service_check.py  |    63 -
 .../2.0.8/services/HIVE/package/scripts/hive.py |   122 -
 .../HIVE/package/scripts/hive_client.py         |    41 -
 .../HIVE/package/scripts/hive_metastore.py      |    63 -
 .../HIVE/package/scripts/hive_server.py         |    63 -
 .../HIVE/package/scripts/hive_service.py        |    56 -
 .../HIVE/package/scripts/mysql_server.py        |    77 -
 .../HIVE/package/scripts/mysql_service.py       |    38 -
 .../services/HIVE/package/scripts/params.py     |   123 -
 .../HIVE/package/scripts/service_check.py       |    56 -
 .../HIVE/package/scripts/status_params.py       |    30 -
 .../HIVE/package/templates/hcat-env.sh.j2       |    25 -
 .../HIVE/package/templates/hive-env.sh.j2       |    55 -
 .../HDP/2.0.8/services/NAGIOS/metainfo.xml      |   105 -
 .../NAGIOS/package/files/check_aggregate.php    |   243 -
 .../services/NAGIOS/package/files/check_cpu.pl  |   114 -
 .../package/files/check_datanode_storage.php    |   100 -
 .../NAGIOS/package/files/check_hdfs_blocks.php  |   115 -
 .../package/files/check_hdfs_capacity.php       |   109 -
 .../files/check_hive_metastore_status.sh        |    45 -
 .../NAGIOS/package/files/check_hue_status.sh    |    31 -
 .../files/check_mapred_local_dir_used.sh        |    34 -
 .../package/files/check_name_dir_status.php     |    93 -
 .../NAGIOS/package/files/check_namenodes_ha.sh  |    82 -
 .../package/files/check_nodemanager_health.sh   |    44 -
 .../NAGIOS/package/files/check_oozie_status.sh  |    45 -
 .../NAGIOS/package/files/check_rpcq_latency.php |   104 -
 .../package/files/check_templeton_status.sh     |    45 -
 .../NAGIOS/package/files/check_webui.sh         |    87 -
 .../NAGIOS/package/files/hdp_nagios_init.php    |    81 -
 .../NAGIOS/package/scripts/functions.py         |    31 -
 .../services/NAGIOS/package/scripts/nagios.py   |    97 -
 .../NAGIOS/package/scripts/nagios_server.py     |    87 -
 .../package/scripts/nagios_server_config.py     |    91 -
 .../NAGIOS/package/scripts/nagios_service.py    |    36 -
 .../services/NAGIOS/package/scripts/params.py   |   162 -
 .../NAGIOS/package/scripts/status_params.py     |    26 -
 .../NAGIOS/package/templates/contacts.cfg.j2    |    91 -
 .../package/templates/hadoop-commands.cfg.j2    |   114 -
 .../package/templates/hadoop-hostgroups.cfg.j2  |    15 -
 .../package/templates/hadoop-hosts.cfg.j2       |    16 -
 .../templates/hadoop-servicegroups.cfg.j2       |    80 -
 .../package/templates/hadoop-services.cfg.j2    |   643 -
 .../NAGIOS/package/templates/nagios.cfg.j2      |  1349 --
 .../NAGIOS/package/templates/nagios.conf.j2     |    62 -
 .../services/NAGIOS/package/templates/nagios.j2 |   146 -
 .../NAGIOS/package/templates/resource.cfg.j2    |    33 -
 .../services/OOZIE/configuration/oozie-site.xml |   313 -
 .../HDP/2.0.8/services/OOZIE/metainfo.xml       |    84 -
 .../services/OOZIE/package/files/oozieSmoke2.sh |    95 -
 .../OOZIE/package/files/wrap_ooziedb.sh         |    31 -
 .../services/OOZIE/package/scripts/oozie.py     |   122 -
 .../OOZIE/package/scripts/oozie_client.py       |    33 -
 .../OOZIE/package/scripts/oozie_server.py       |    47 -
 .../OOZIE/package/scripts/oozie_service.py      |    65 -
 .../services/OOZIE/package/scripts/params.py    |    79 -
 .../OOZIE/package/scripts/service_check.py      |    67 -
 .../OOZIE/package/scripts/status_params.py      |    26 -
 .../OOZIE/package/templates/oozie-env.sh.j2     |    64 -
 .../package/templates/oozie-log4j.properties.j2 |    74 -
 .../services/PIG/configuration/pig.properties   |    52 -
 .../stacks/HDP/2.0.8/services/PIG/metainfo.xml  |    60 -
 .../services/PIG/package/files/pigSmoke.sh      |    18 -
 .../services/PIG/package/scripts/params.py      |    36 -
 .../2.0.8/services/PIG/package/scripts/pig.py   |    46 -
 .../services/PIG/package/scripts/pig_client.py  |    52 -
 .../PIG/package/scripts/service_check.py        |    75 -
 .../PIG/package/templates/log4j.properties.j2   |    30 -
 .../PIG/package/templates/pig-env.sh.j2         |    17 -
 .../PIG/package/templates/pig.properties.j2     |    55 -
 .../HDP/2.0.8/services/SQOOP/metainfo.xml       |    60 -
 .../services/SQOOP/package/scripts/__init__.py  |    19 -
 .../services/SQOOP/package/scripts/params.py    |    37 -
 .../SQOOP/package/scripts/service_check.py      |    37 -
 .../services/SQOOP/package/scripts/sqoop.py     |    52 -
 .../SQOOP/package/scripts/sqoop_client.py       |    41 -
 .../SQOOP/package/templates/sqoop-env.sh.j2     |    36 -
 .../services/STORM/configuration/global.xml     |    39 -
 .../services/STORM/configuration/storm-site.xml |   514 -
 .../HDP/2.0.8/services/STORM/metainfo.xml       |   104 -
 .../services/STORM/package/files/wordCount.jar  |   Bin 690588 -> 0 bytes
 .../STORM/package/scripts/drpc_server.py        |    62 -
 .../STORM/package/scripts/logviewer_server.py   |    62 -
 .../services/STORM/package/scripts/nimbus.py    |    62 -
 .../services/STORM/package/scripts/params.py    |    34 -
 .../services/STORM/package/scripts/service.py   |    62 -
 .../STORM/package/scripts/service_check.py      |    43 -
 .../STORM/package/scripts/status_params.py      |    35 -
 .../services/STORM/package/scripts/storm.py     |    39 -
 .../STORM/package/scripts/supervisor.py         |    63 -
 .../services/STORM/package/scripts/ui_server.py |    62 -
 .../STORM/package/scripts/yaml_config.py        |    49 -
 .../WEBHCAT/configuration/webhcat-site.xml      |   126 -
 .../HDP/2.0.8/services/WEBHCAT/metainfo.xml     |    65 -
 .../WEBHCAT/package/files/templetonSmoke.sh     |    95 -
 .../WEBHCAT/package/scripts/__init__.py         |    21 -
 .../services/WEBHCAT/package/scripts/params.py  |    51 -
 .../WEBHCAT/package/scripts/service_check.py    |    45 -
 .../WEBHCAT/package/scripts/status_params.py    |    26 -
 .../services/WEBHCAT/package/scripts/webhcat.py |   120 -
 .../WEBHCAT/package/scripts/webhcat_server.py   |    54 -
 .../WEBHCAT/package/scripts/webhcat_service.py  |    41 -
 .../WEBHCAT/package/templates/webhcat-env.sh.j2 |    44 -
 .../YARN/configuration/capacity-scheduler.xml   |   128 -
 .../services/YARN/configuration/core-site.xml   |    20 -
 .../services/YARN/configuration/global.xml      |    88 -
 .../YARN/configuration/mapred-queue-acls.xml    |    39 -
 .../services/YARN/configuration/mapred-site.xml |   381 -
 .../services/YARN/configuration/yarn-site.xml   |   337 -
 .../stacks/HDP/2.0.8/services/YARN/metainfo.xml |   172 -
 .../stacks/HDP/2.0.8/services/YARN/metrics.json |  2534 ---
 .../files/validateYarnComponentStatus.py        |   165 -
 .../services/YARN/package/scripts/__init__.py   |    21 -
 .../YARN/package/scripts/historyserver.py       |    60 -
 .../package/scripts/mapred_service_check.py     |    74 -
 .../YARN/package/scripts/mapreduce2_client.py   |    43 -
 .../YARN/package/scripts/nodemanager.py         |    61 -
 .../services/YARN/package/scripts/params.py     |    89 -
 .../YARN/package/scripts/resourcemanager.py     |   112 -
 .../services/YARN/package/scripts/service.py    |    65 -
 .../YARN/package/scripts/service_check.py       |    67 -
 .../YARN/package/scripts/status_params.py       |    34 -
 .../2.0.8/services/YARN/package/scripts/yarn.py |   126 -
 .../YARN/package/scripts/yarn_client.py         |    43 -
 .../package/templates/container-executor.cfg.j2 |    22 -
 .../package/templates/exclude_hosts_list.j2     |     3 -
 .../YARN/package/templates/mapreduce.conf.j2    |    17 -
 .../YARN/package/templates/yarn-env.sh.j2       |   119 -
 .../YARN/package/templates/yarn.conf.j2         |    17 -
 .../services/ZOOKEEPER/configuration/global.xml |    75 -
 .../HDP/2.0.8/services/ZOOKEEPER/metainfo.xml   |    70 -
 .../services/ZOOKEEPER/package/files/zkEnv.sh   |    96 -
 .../ZOOKEEPER/package/files/zkServer.sh         |   120 -
 .../ZOOKEEPER/package/files/zkService.sh        |    26 -
 .../services/ZOOKEEPER/package/files/zkSmoke.sh |    78 -
 .../ZOOKEEPER/package/scripts/__init__.py       |    21 -
 .../ZOOKEEPER/package/scripts/params.py         |    71 -
 .../ZOOKEEPER/package/scripts/service_check.py  |    47 -
 .../ZOOKEEPER/package/scripts/status_params.py  |    26 -
 .../ZOOKEEPER/package/scripts/zookeeper.py      |    92 -
 .../package/scripts/zookeeper_client.py         |    43 -
 .../package/scripts/zookeeper_server.py         |    55 -
 .../package/scripts/zookeeper_service.py        |    43 -
 .../package/templates/configuration.xsl.j2      |    24 -
 .../package/templates/log4j.properties.j2       |    71 -
 .../ZOOKEEPER/package/templates/zoo.cfg.j2      |    51 -
 .../package/templates/zookeeper-env.sh.j2       |    25 -
 .../templates/zookeeper_client_jaas.conf.j2     |     5 -
 .../package/templates/zookeeper_jaas.conf.j2    |     8 -
 .../before-INSTALL/files/changeToSecureUid.sh   |    50 +
 .../2.1.1/hooks/before-INSTALL/scripts/hook.py  |    36 +
 .../hooks/before-INSTALL/scripts/params.py      |    84 +
 .../scripts/shared_initialization.py            |   113 +
 .../hooks/before-START/files/checkForFormat.sh  |    62 +
 .../before-START/files/task-log4j.properties    |   132 +
 .../2.1.1/hooks/before-START/scripts/hook.py    |    37 +
 .../2.1.1/hooks/before-START/scripts/params.py  |   172 +
 .../scripts/shared_initialization.py            |   327 +
 .../templates/commons-logging.properties.j2     |    25 +
 .../templates/exclude_hosts_list.j2             |     3 +
 .../before-START/templates/hadoop-env.sh.j2     |   121 +
 .../templates/hadoop-metrics2.properties.j2     |    45 +
 .../hooks/before-START/templates/hdfs.conf.j2   |    17 +
 .../before-START/templates/health_check-v2.j2   |    91 +
 .../before-START/templates/health_check.j2      |   118 +
 .../templates/include_hosts_list.j2             |     3 +
 .../before-START/templates/log4j.properties.j2  |   218 +
 .../hooks/before-START/templates/slaves.j2      |     3 +
 .../hooks/before-START/templates/snmpd.conf.j2  |    48 +
 .../templates/taskcontroller.cfg.j2             |    20 +
 .../resources/stacks/HDP/2.1.1/metainfo.xml     |    22 +
 .../stacks/HDP/2.1.1/repos/repoinfo.xml         |    75 +
 .../stacks/HDP/2.1.1/role_command_order.json    |   107 +
 .../services/FALCON/configuration/global.xml    |    42 +
 .../FALCON/configuration/oozie-site.xml         |   145 +
 .../HDP/2.1.1/services/FALCON/metainfo.xml      |    72 +
 .../services/FALCON/package/scripts/falcon.py   |    66 +
 .../FALCON/package/scripts/falcon_client.py     |    40 +
 .../FALCON/package/scripts/falcon_server.py     |    61 +
 .../services/FALCON/package/scripts/params.py   |    36 +
 .../FALCON/package/scripts/service_check.py     |    38 +
 .../FALCON/package/scripts/status_params.py     |    24 +
 .../package/templates/client.properties.j2      |    24 +
 .../package/templates/runtime.properties.j2     |    33 +
 .../package/templates/startup.properties.j2     |    70 +
 .../HDP/2.1.1/services/GANGLIA/metainfo.xml     |   101 +
 .../GANGLIA/package/files/checkGmetad.sh        |    37 +
 .../GANGLIA/package/files/checkGmond.sh         |    62 +
 .../GANGLIA/package/files/checkRrdcached.sh     |    34 +
 .../services/GANGLIA/package/files/gmetad.init  |    73 +
 .../services/GANGLIA/package/files/gmetadLib.sh |   204 +
 .../services/GANGLIA/package/files/gmond.init   |    73 +
 .../services/GANGLIA/package/files/gmondLib.sh  |   545 +
 .../2.1.1/services/GANGLIA/package/files/rrd.py |   213 +
 .../GANGLIA/package/files/rrdcachedLib.sh       |    47 +
 .../GANGLIA/package/files/setupGanglia.sh       |   141 +
 .../GANGLIA/package/files/startGmetad.sh        |    64 +
 .../GANGLIA/package/files/startGmond.sh         |    80 +
 .../GANGLIA/package/files/startRrdcached.sh     |    69 +
 .../GANGLIA/package/files/stopGmetad.sh         |    43 +
 .../services/GANGLIA/package/files/stopGmond.sh |    54 +
 .../GANGLIA/package/files/stopRrdcached.sh      |    41 +
 .../GANGLIA/package/files/teardownGanglia.sh    |    28 +
 .../services/GANGLIA/package/scripts/ganglia.py |    97 +
 .../GANGLIA/package/scripts/ganglia_monitor.py  |   176 +
 .../package/scripts/ganglia_monitor_service.py  |    31 +
 .../GANGLIA/package/scripts/ganglia_server.py   |   197 +
 .../package/scripts/ganglia_server_service.py   |    27 +
 .../services/GANGLIA/package/scripts/params.py  |    80 +
 .../GANGLIA/package/scripts/status_params.py    |    25 +
 .../package/templates/gangliaClusters.conf.j2   |    35 +
 .../GANGLIA/package/templates/gangliaEnv.sh.j2  |    24 +
 .../GANGLIA/package/templates/gangliaLib.sh.j2  |    62 +
 .../services/HBASE/configuration/global.xml     |   160 +
 .../HBASE/configuration/hbase-policy.xml        |    53 +
 .../services/HBASE/configuration/hbase-site.xml |   356 +
 .../HDP/2.1.1/services/HBASE/metainfo.xml       |    93 +
 .../HDP/2.1.1/services/HBASE/metrics.json       | 13635 +++++++++++++++++
 .../HBASE/package/files/hbaseSmokeVerify.sh     |    32 +
 .../services/HBASE/package/scripts/__init__.py  |    19 +
 .../services/HBASE/package/scripts/functions.py |    67 +
 .../services/HBASE/package/scripts/hbase.py     |    91 +
 .../HBASE/package/scripts/hbase_client.py       |    52 +
 .../HBASE/package/scripts/hbase_master.py       |    74 +
 .../HBASE/package/scripts/hbase_regionserver.py |    75 +
 .../HBASE/package/scripts/hbase_service.py      |    46 +
 .../services/HBASE/package/scripts/params.py    |    84 +
 .../HBASE/package/scripts/service_check.py      |    89 +
 .../HBASE/package/scripts/status_params.py      |    25 +
 ...-metrics2-hbase.properties-GANGLIA-MASTER.j2 |    62 +
 ...doop-metrics2-hbase.properties-GANGLIA-RS.j2 |    62 +
 .../HBASE/package/templates/hbase-env.sh.j2     |    82 +
 .../HBASE/package/templates/hbase-smoke.sh.j2   |    26 +
 .../package/templates/hbase_client_jaas.conf.j2 |     5 +
 .../templates/hbase_grant_permissions.j2        |    21 +
 .../package/templates/hbase_master_jaas.conf.j2 |     8 +
 .../templates/hbase_regionserver_jaas.conf.j2   |     8 +
 .../HBASE/package/templates/regionservers.j2    |     2 +
 .../services/HDFS/configuration/core-site.xml   |   167 +
 .../services/HDFS/configuration/global.xml      |   192 +
 .../HDFS/configuration/hadoop-policy.xml        |   134 +
 .../services/HDFS/configuration/hdfs-site.xml   |   513 +
 .../stacks/HDP/2.1.1/services/HDFS/metainfo.xml |   152 +
 .../stacks/HDP/2.1.1/services/HDFS/metrics.json |  7800 ++++++++++
 .../HDFS/package/files/checkForFormat.sh        |    62 +
 .../services/HDFS/package/files/checkWebUI.py   |    53 +
 .../services/HDFS/package/scripts/datanode.py   |    57 +
 .../HDFS/package/scripts/hdfs_client.py         |    52 +
 .../HDFS/package/scripts/hdfs_datanode.py       |    56 +
 .../HDFS/package/scripts/hdfs_namenode.py       |   212 +
 .../HDFS/package/scripts/hdfs_snamenode.py      |    53 +
 .../HDFS/package/scripts/journalnode.py         |    74 +
 .../services/HDFS/package/scripts/namenode.py   |    68 +
 .../services/HDFS/package/scripts/params.py     |   188 +
 .../HDFS/package/scripts/service_check.py       |   107 +
 .../services/HDFS/package/scripts/snamenode.py  |    64 +
 .../HDFS/package/scripts/status_params.py       |    31 +
 .../services/HDFS/package/scripts/utils.py      |   138 +
 .../services/HDFS/package/scripts/zkfc_slave.py |    62 +
 .../package/templates/exclude_hosts_list.j2     |     3 +
 .../services/HIVE/configuration/hive-site.xml   |   267 +
 .../stacks/HDP/2.1.1/services/HIVE/metainfo.xml |   156 +
 .../services/HIVE/package/files/addMysqlUser.sh |    41 +
 .../services/HIVE/package/files/hcatSmoke.sh    |    35 +
 .../services/HIVE/package/files/hiveSmoke.sh    |    23 +
 .../services/HIVE/package/files/hiveserver2.sql |    23 +
 .../HIVE/package/files/hiveserver2Smoke.sh      |    31 +
 .../services/HIVE/package/files/pigSmoke.sh     |    18 +
 .../HIVE/package/files/startHiveserver2.sh      |    22 +
 .../HIVE/package/files/startMetastore.sh        |    22 +
 .../services/HIVE/package/scripts/__init__.py   |    19 +
 .../2.1.1/services/HIVE/package/scripts/hcat.py |    47 +
 .../HIVE/package/scripts/hcat_client.py         |    43 +
 .../HIVE/package/scripts/hcat_service_check.py  |    63 +
 .../2.1.1/services/HIVE/package/scripts/hive.py |   122 +
 .../HIVE/package/scripts/hive_client.py         |    41 +
 .../HIVE/package/scripts/hive_metastore.py      |    63 +
 .../HIVE/package/scripts/hive_server.py         |    63 +
 .../HIVE/package/scripts/hive_service.py        |    56 +
 .../HIVE/package/scripts/mysql_server.py        |    77 +
 .../HIVE/package/scripts/mysql_service.py       |    38 +
 .../services/HIVE/package/scripts/params.py     |   123 +
 .../HIVE/package/scripts/service_check.py       |    56 +
 .../HIVE/package/scripts/status_params.py       |    30 +
 .../HIVE/package/templates/hcat-env.sh.j2       |    25 +
 .../HIVE/package/templates/hive-env.sh.j2       |    55 +
 .../HDP/2.1.1/services/NAGIOS/metainfo.xml      |   105 +
 .../NAGIOS/package/files/check_aggregate.php    |   243 +
 .../services/NAGIOS/package/files/check_cpu.pl  |   114 +
 .../package/files/check_datanode_storage.php    |   100 +
 .../NAGIOS/package/files/check_hdfs_blocks.php  |   115 +
 .../package/files/check_hdfs_capacity.php       |   109 +
 .../files/check_hive_metastore_status.sh        |    45 +
 .../NAGIOS/package/files/check_hue_status.sh    |    31 +
 .../files/check_mapred_local_dir_used.sh        |    34 +
 .../package/files/check_name_dir_status.php     |    93 +
 .../NAGIOS/package/files/check_namenodes_ha.sh  |    82 +
 .../package/files/check_nodemanager_health.sh   |    44 +
 .../NAGIOS/package/files/check_oozie_status.sh  |    45 +
 .../NAGIOS/package/files/check_rpcq_latency.php |   104 +
 .../package/files/check_templeton_status.sh     |    45 +
 .../NAGIOS/package/files/check_webui.sh         |    87 +
 .../NAGIOS/package/files/hdp_nagios_init.php    |    81 +
 .../NAGIOS/package/scripts/functions.py         |    31 +
 .../services/NAGIOS/package/scripts/nagios.py   |    97 +
 .../NAGIOS/package/scripts/nagios_server.py     |    87 +
 .../package/scripts/nagios_server_config.py     |    91 +
 .../NAGIOS/package/scripts/nagios_service.py    |    36 +
 .../services/NAGIOS/package/scripts/params.py   |   162 +
 .../NAGIOS/package/scripts/status_params.py     |    26 +
 .../NAGIOS/package/templates/contacts.cfg.j2    |    91 +
 .../package/templates/hadoop-commands.cfg.j2    |   114 +
 .../package/templates/hadoop-hostgroups.cfg.j2  |    15 +
 .../package/templates/hadoop-hosts.cfg.j2       |    16 +
 .../templates/hadoop-servicegroups.cfg.j2       |    80 +
 .../package/templates/hadoop-services.cfg.j2    |   643 +
 .../NAGIOS/package/templates/nagios.cfg.j2      |  1349 ++
 .../NAGIOS/package/templates/nagios.conf.j2     |    62 +
 .../services/NAGIOS/package/templates/nagios.j2 |   146 +
 .../NAGIOS/package/templates/resource.cfg.j2    |    33 +
 .../services/OOZIE/configuration/oozie-site.xml |   313 +
 .../HDP/2.1.1/services/OOZIE/metainfo.xml       |    84 +
 .../services/OOZIE/package/files/oozieSmoke2.sh |    95 +
 .../OOZIE/package/files/wrap_ooziedb.sh         |    31 +
 .../services/OOZIE/package/scripts/oozie.py     |   122 +
 .../OOZIE/package/scripts/oozie_client.py       |    33 +
 .../OOZIE/package/scripts/oozie_server.py       |    47 +
 .../OOZIE/package/scripts/oozie_service.py      |    65 +
 .../services/OOZIE/package/scripts/params.py    |    79 +
 .../OOZIE/package/scripts/service_check.py      |    67 +
 .../OOZIE/package/scripts/status_params.py      |    26 +
 .../OOZIE/package/templates/oozie-env.sh.j2     |    64 +
 .../package/templates/oozie-log4j.properties.j2 |    74 +
 .../services/PIG/configuration/pig.properties   |    52 +
 .../stacks/HDP/2.1.1/services/PIG/metainfo.xml  |    60 +
 .../services/PIG/package/files/pigSmoke.sh      |    18 +
 .../services/PIG/package/scripts/params.py      |    36 +
 .../2.1.1/services/PIG/package/scripts/pig.py   |    46 +
 .../services/PIG/package/scripts/pig_client.py  |    52 +
 .../PIG/package/scripts/service_check.py        |    75 +
 .../PIG/package/templates/log4j.properties.j2   |    30 +
 .../PIG/package/templates/pig-env.sh.j2         |    17 +
 .../PIG/package/templates/pig.properties.j2     |    55 +
 .../HDP/2.1.1/services/SQOOP/metainfo.xml       |    60 +
 .../services/SQOOP/package/scripts/__init__.py  |    19 +
 .../services/SQOOP/package/scripts/params.py    |    37 +
 .../SQOOP/package/scripts/service_check.py      |    37 +
 .../services/SQOOP/package/scripts/sqoop.py     |    52 +
 .../SQOOP/package/scripts/sqoop_client.py       |    41 +
 .../SQOOP/package/templates/sqoop-env.sh.j2     |    36 +
 .../services/STORM/configuration/global.xml     |    39 +
 .../services/STORM/configuration/storm-site.xml |   514 +
 .../HDP/2.1.1/services/STORM/metainfo.xml       |   104 +
 .../services/STORM/package/files/wordCount.jar  |   Bin 0 -> 690588 bytes
 .../STORM/package/scripts/drpc_server.py        |    62 +
 .../STORM/package/scripts/logviewer_server.py   |    62 +
 .../services/STORM/package/scripts/nimbus.py    |    62 +
 .../services/STORM/package/scripts/params.py    |    34 +
 .../services/STORM/package/scripts/service.py   |    62 +
 .../STORM/package/scripts/service_check.py      |    43 +
 .../STORM/package/scripts/status_params.py      |    35 +
 .../services/STORM/package/scripts/storm.py     |    39 +
 .../STORM/package/scripts/supervisor.py         |    63 +
 .../services/STORM/package/scripts/ui_server.py |    62 +
 .../STORM/package/scripts/yaml_config.py        |    49 +
 .../WEBHCAT/configuration/webhcat-site.xml      |   126 +
 .../HDP/2.1.1/services/WEBHCAT/metainfo.xml     |    65 +
 .../WEBHCAT/package/files/templetonSmoke.sh     |    95 +
 .../WEBHCAT/package/scripts/__init__.py         |    21 +
 .../services/WEBHCAT/package/scripts/params.py  |    51 +
 .../WEBHCAT/package/scripts/service_check.py    |    45 +
 .../WEBHCAT/package/scripts/status_params.py    |    26 +
 .../services/WEBHCAT/package/scripts/webhcat.py |   120 +
 .../WEBHCAT/package/scripts/webhcat_server.py   |    54 +
 .../WEBHCAT/package/scripts/webhcat_service.py  |    41 +
 .../WEBHCAT/package/templates/webhcat-env.sh.j2 |    44 +
 .../YARN/configuration/capacity-scheduler.xml   |   128 +
 .../services/YARN/configuration/core-site.xml   |    20 +
 .../services/YARN/configuration/global.xml      |    88 +
 .../YARN/configuration/mapred-queue-acls.xml    |    39 +
 .../services/YARN/configuration/mapred-site.xml |   381 +
 .../services/YARN/configuration/yarn-site.xml   |   337 +
 .../stacks/HDP/2.1.1/services/YARN/metainfo.xml |   172 +
 .../stacks/HDP/2.1.1/services/YARN/metrics.json |  2534 +++
 .../files/validateYarnComponentStatus.py        |   165 +
 .../services/YARN/package/scripts/__init__.py   |    21 +
 .../YARN/package/scripts/historyserver.py       |    60 +
 .../package/scripts/mapred_service_check.py     |    74 +
 .../YARN/package/scripts/mapreduce2_client.py   |    43 +
 .../YARN/package/scripts/nodemanager.py         |    61 +
 .../services/YARN/package/scripts/params.py     |    89 +
 .../YARN/package/scripts/resourcemanager.py     |   112 +
 .../services/YARN/package/scripts/service.py    |    65 +
 .../YARN/package/scripts/service_check.py       |    67 +
 .../YARN/package/scripts/status_params.py       |    34 +
 .../2.1.1/services/YARN/package/scripts/yarn.py |   126 +
 .../YARN/package/scripts/yarn_client.py         |    43 +
 .../package/templates/container-executor.cfg.j2 |    22 +
 .../package/templates/exclude_hosts_list.j2     |     3 +
 .../YARN/package/templates/mapreduce.conf.j2    |    17 +
 .../YARN/package/templates/yarn-env.sh.j2       |   119 +
 .../YARN/package/templates/yarn.conf.j2         |    17 +
 .../services/ZOOKEEPER/configuration/global.xml |    75 +
 .../HDP/2.1.1/services/ZOOKEEPER/metainfo.xml   |    70 +
 .../services/ZOOKEEPER/package/files/zkEnv.sh   |    96 +
 .../ZOOKEEPER/package/files/zkServer.sh         |   120 +
 .../ZOOKEEPER/package/files/zkService.sh        |    26 +
 .../services/ZOOKEEPER/package/files/zkSmoke.sh |    78 +
 .../ZOOKEEPER/package/scripts/__init__.py       |    21 +
 .../ZOOKEEPER/package/scripts/params.py         |    71 +
 .../ZOOKEEPER/package/scripts/service_check.py  |    47 +
 .../ZOOKEEPER/package/scripts/status_params.py  |    26 +
 .../ZOOKEEPER/package/scripts/zookeeper.py      |    92 +
 .../package/scripts/zookeeper_client.py         |    43 +
 .../package/scripts/zookeeper_server.py         |    55 +
 .../package/scripts/zookeeper_service.py        |    43 +
 .../package/templates/configuration.xsl.j2      |    24 +
 .../package/templates/log4j.properties.j2       |    71 +
 .../ZOOKEEPER/package/templates/zoo.cfg.j2      |    51 +
 .../package/templates/zookeeper-env.sh.j2       |    25 +
 .../templates/zookeeper_client_jaas.conf.j2     |     5 +
 .../package/templates/zookeeper_jaas.conf.j2    |     8 +
 751 files changed, 61772 insertions(+), 46458 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/pom.xml
----------------------------------------------------------------------
diff --git a/ambari-server/pom.xml b/ambari-server/pom.xml
index 0f4a291..17d78d0 100644
--- a/ambari-server/pom.xml
+++ b/ambari-server/pom.xml
@@ -29,7 +29,7 @@
   <properties>
     <project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
     <python.ver>python &gt;= 2.6</python.ver>
-    <hdpUrlForCentos6>http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0</hdpUrlForCentos6>
+    <hdpUrlForCentos6>http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.1.1.0</hdpUrlForCentos6>
   </properties>
   <build>
     <plugins>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/set-hdp-repo-url.sh
----------------------------------------------------------------------
diff --git a/ambari-server/set-hdp-repo-url.sh b/ambari-server/set-hdp-repo-url.sh
index 99173b3..62a8ec1 100644
--- a/ambari-server/set-hdp-repo-url.sh
+++ b/ambari-server/set-hdp-repo-url.sh
@@ -26,7 +26,7 @@ then
   #  Modify the VERSION variable in this file to match the new version
   #  Modify the previous version to store concrete public repo url
 
-  VERSION=2.0.6
+  VERSION=2.1.1
   C6URL="$1"
   C5URL="${C6URL/centos6/centos5}"
   S11URL="${C6URL/centos6/suse11}"

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-INSTALL/files/changeToSecureUid.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-INSTALL/files/changeToSecureUid.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-INSTALL/files/changeToSecureUid.sh
new file mode 100644
index 0000000..4872a10
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-INSTALL/files/changeToSecureUid.sh
@@ -0,0 +1,50 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+username=$1
+directories=$2
+
+function find_available_uid() {
+ for ((i=1001; i<=2000; i++))
+ do
+   grep -q $i /etc/passwd
+   if [ "$?" -ne 0 ]
+   then
+    newUid=$i
+    break
+   fi
+ done
+}
+
+find_available_uid
+
+if [ $newUid -eq 0 ]
+then
+  echo "Failed to find Uid between 1000 and 2000"
+  exit 1
+fi
+
+dir_array=($(echo $directories | sed 's/,/\n/g'))
+old_uid=$(id -u $username)
+echo "Changing uid of $username from $old_uid to $newUid"
+echo "Changing directory permisions for ${dir_array[@]}"
+usermod -u $newUid $username && for dir in ${dir_array[@]} ; do chown -Rh $newUid $dir ; done
+exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-INSTALL/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-INSTALL/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-INSTALL/scripts/hook.py
new file mode 100644
index 0000000..51e5cd2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-INSTALL/scripts/hook.py
@@ -0,0 +1,36 @@
+##!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from shared_initialization import *
+
+#TODO this must be "CONFIGURE" hook when CONFIGURE command will be implemented
+class BeforeConfigureHook(Hook):
+
+  def hook(self, env):
+    import params
+
+    env.set_params(params)
+    setup_users()
+    install_packages()
+
+if __name__ == "__main__":
+  BeforeConfigureHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-INSTALL/scripts/params.py
new file mode 100644
index 0000000..fa19ca3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-INSTALL/scripts/params.py
@@ -0,0 +1,81 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from resource_management.core.system import System
+import os
+
+config = Script.get_config()
+
+#users and groups
+yarn_user = config['configurations']['global']['yarn_user']
+hbase_user = config['configurations']['global']['hbase_user']
+nagios_user = config['configurations']['global']['nagios_user']
+oozie_user = config['configurations']['global']['oozie_user']
+webhcat_user = config['configurations']['global']['hcat_user']
+hcat_user = config['configurations']['global']['hcat_user']
+hive_user = config['configurations']['global']['hive_user']
+smoke_user =  config['configurations']['global']['smokeuser']
+mapred_user = config['configurations']['global']['mapred_user']
+hdfs_user = config['configurations']['global']['hdfs_user']
+zk_user = config['configurations']['global']['zk_user']
+gmetad_user = config['configurations']['global']["gmetad_user"]
+gmond_user = config['configurations']['global']["gmond_user"]
+
+user_group = config['configurations']['global']['user_group']
+proxyuser_group =  config['configurations']['global']['proxyuser_group']
+nagios_group = config['configurations']['global']['nagios_group']
+smoke_user_group =  "users"
+mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
+
+#hosts
+hostname = config["hostname"]
+rm_host = default("/clusterHostInfo/rm_host", [])
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
+oozie_servers = default("/clusterHostInfo/oozie_server", [])
+hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
+hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+hs_host = default("/clusterHostInfo/hs_host", [])
+jtnode_host = default("/clusterHostInfo/jtnode_host", [])
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
+ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
+
+has_resourcemanager = not len(rm_host) == 0
+has_slaves = not len(slave_hosts) == 0
+has_nagios = not len(hagios_server_hosts) == 0
+has_oozie_server = not len(oozie_servers)  == 0
+has_hcat_server_host = not len(hcat_server_hosts)  == 0
+has_hive_server_host = not len(hive_server_host)  == 0
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_zk_host = not len(zk_hosts) == 0
+has_ganglia_server = not len(ganglia_server_hosts) == 0
+
+is_namenode_master = hostname in namenode_host
+is_jtnode_master = hostname in jtnode_host
+is_rmnode_master = hostname in rm_host
+is_hsnode_master = hostname in hs_host
+is_hbase_master = hostname in hbase_master_hosts
+is_slave = hostname in slave_hosts
+if has_ganglia_server:
+  ganglia_server_host = ganglia_server_hosts[0]
+
+hbase_tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-INSTALL/scripts/shared_initialization.py
new file mode 100644
index 0000000..26a7592
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-INSTALL/scripts/shared_initialization.py
@@ -0,0 +1,107 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+
+from resource_management import *
+
+def setup_users():
+  """
+  Creates users before cluster installation
+  """
+  import params
+
+  Group(params.user_group)
+  Group(params.smoke_user_group)
+  Group(params.proxyuser_group)
+  User(params.smoke_user,
+       gid=params.user_group,
+       groups=[params.proxyuser_group]
+  )
+  smoke_user_dirs = format(
+    "/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
+  set_uid(params.smoke_user, smoke_user_dirs)
+
+  if params.has_hbase_masters:
+    User(params.hbase_user,
+         gid = params.user_group,
+         groups=[params.user_group])
+    hbase_user_dirs = format(
+      "/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
+    set_uid(params.hbase_user, hbase_user_dirs)
+
+  if params.has_nagios:
+    Group(params.nagios_group)
+    User(params.nagios_user,
+         gid=params.nagios_group)
+
+  if params.has_oozie_server:
+    User(params.oozie_user,
+         gid = params.user_group)
+
+  if params.has_hcat_server_host:
+    User(params.webhcat_user,
+         gid = params.user_group)
+    User(params.hcat_user,
+         gid = params.user_group)
+
+  if params.has_hive_server_host:
+    User(params.hive_user,
+         gid = params.user_group)
+
+  if params.has_resourcemanager:
+    User(params.yarn_user,
+         gid = params.user_group)
+
+  if params.has_ganglia_server:
+    Group(params.gmetad_user)
+    Group(params.gmond_user)
+    User(params.gmond_user,
+         gid=params.user_group,
+        groups=[params.gmond_user])
+    User(params.gmetad_user,
+         gid=params.user_group,
+        groups=[params.gmetad_user])
+
+  User(params.hdfs_user,
+        gid=params.user_group,
+        groups=[params.user_group]
+  )
+  User(params.mapred_user,
+       gid=params.user_group,
+       groups=[params.user_group]
+  )
+  if params.has_zk_host:
+    User(params.zk_user,
+         gid=params.user_group)
+
+def set_uid(user, user_dirs):
+  """
+  user_dirs - comma separated directories
+  """
+  File("/tmp/changeUid.sh",
+       content=StaticFile("changeToSecureUid.sh"),
+       mode=0555)
+  Execute(format("/tmp/changeUid.sh {user} {user_dirs} 2>/dev/null"),
+          not_if = format("test $(id -u {user}) -gt 1000"))
+
+def install_packages():
+  Package("unzip")
+  Package("net-snmp")
+  Package("net-snmp-utils")
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/files/checkForFormat.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/files/checkForFormat.sh
new file mode 100644
index 0000000..d14091a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/files/checkForFormat.sh
@@ -0,0 +1,62 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+export hdfs_user=$1
+shift
+export conf_dir=$1
+shift
+export mark_dir=$1
+shift
+export name_dirs=$*
+
+export EXIT_CODE=0
+export command="namenode -format"
+export list_of_non_empty_dirs=""
+
+mark_file=/var/run/hadoop/hdfs/namenode-formatted
+if [[ -f ${mark_file} ]] ; then
+  rm -f ${mark_file}
+  mkdir -p ${mark_dir}
+fi
+
+if [[ ! -d $mark_dir ]] ; then
+  for dir in `echo $name_dirs | tr ',' ' '` ; do
+    echo "NameNode Dirname = $dir"
+    cmd="ls $dir | wc -l  | grep -q ^0$"
+    eval $cmd
+    if [[ $? -ne 0 ]] ; then
+      (( EXIT_CODE = $EXIT_CODE + 1 ))
+      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
+    fi
+  done
+
+  if [[ $EXIT_CODE == 0 ]] ; then
+    su - ${hdfs_user} -c "yes Y | hadoop --config ${conf_dir} ${command}"
+  else
+    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
+  fi
+else
+  echo "${mark_dir} exists. Namenode DFS already formatted"
+fi
+
+exit $EXIT_CODE
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/files/task-log4j.properties
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/files/task-log4j.properties b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/files/task-log4j.properties
new file mode 100644
index 0000000..c8939fc
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/files/task-log4j.properties
@@ -0,0 +1,132 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+
+# Define some default values that can be overridden by system properties
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+#
+# Job Summary Appender 
+#
+# Use following logger to send summary to separate file defined by 
+# hadoop.mapreduce.jobsummary.log.file rolled daily:
+# hadoop.mapreduce.jobsummary.logger=INFO,JSA
+# 
+hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
+hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this 
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+# Rolling File Appender
+#
+
+#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Logfile size and and 30-day backups
+#log4j.appender.RFA.MaxFileSize=1MB
+#log4j.appender.RFA.MaxBackupIndex=30
+
+#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+# Custom Logging levels
+
+hadoop.metrics.log.level=INFO
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+#
+# Null Appender
+# Trap security logger on the hadoop client side
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/scripts/hook.py
new file mode 100644
index 0000000..e11bfac
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/scripts/hook.py
@@ -0,0 +1,37 @@
+##!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from shared_initialization import *
+
+#TODO this must be "CONFIGURE" hook when CONFIGURE command will be implemented
+class BeforeConfigureHook(Hook):
+
+  def hook(self, env):
+    import params
+
+    env.set_params(params)
+    setup_java()
+    setup_hadoop()
+    setup_configs()
+
+if __name__ == "__main__":
+  BeforeConfigureHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/scripts/params.py
new file mode 100644
index 0000000..aabb406
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/scripts/params.py
@@ -0,0 +1,172 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from resource_management.core.system import System
+import os
+
+config = Script.get_config()
+
+#java params
+artifact_dir = "/tmp/HDP-artifacts/"
+jdk_name = default("/hostLevelParams/jdk_name", None) # None when jdk is already installed by user
+jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user
+jce_location = config['hostLevelParams']['jdk_location']
+jdk_location = config['hostLevelParams']['jdk_location']
+#security params
+security_enabled = config['configurations']['global']['security_enabled']
+dfs_journalnode_keytab_file = config['configurations']['hdfs-site']['dfs.journalnode.keytab.file']
+dfs_web_authentication_kerberos_keytab = config['configurations']['hdfs-site']['dfs.journalnode.keytab.file']
+dfs_secondary_namenode_keytab_file =  config['configurations']['hdfs-site']['fs.secondary.namenode.keytab.file']
+dfs_datanode_keytab_file =  config['configurations']['hdfs-site']['dfs.datanode.keytab.file']
+dfs_namenode_keytab_file =  config['configurations']['hdfs-site']['dfs.namenode.keytab.file']
+
+dfs_datanode_kerberos_principal = config['configurations']['hdfs-site']['dfs.datanode.kerberos.principal']
+dfs_journalnode_kerberos_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.principal']
+dfs_secondary_namenode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.internal.spnego.principal']
+dfs_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.namenode.kerberos.principal']
+dfs_web_authentication_kerberos_principal = config['configurations']['hdfs-site']['dfs.web.authentication.kerberos.principal']
+dfs_secondary_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.principal']
+dfs_journalnode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.internal.spnego.principal']
+
+#users and groups
+mapred_user = config['configurations']['global']['mapred_user']
+hdfs_user = config['configurations']['global']['hdfs_user']
+yarn_user = config['configurations']['global']['yarn_user']
+
+user_group = config['configurations']['global']['user_group']
+mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
+
+#snmp
+snmp_conf_dir = "/etc/snmp/"
+snmp_source = "0.0.0.0/0"
+snmp_community = "hadoop"
+
+#hosts
+hostname = config["hostname"]
+rm_host = default("/clusterHostInfo/rm_host", [])
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
+oozie_servers = default("/clusterHostInfo/oozie_server", [])
+hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
+hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+hs_host = default("/clusterHostInfo/hs_host", [])
+jtnode_host = default("/clusterHostInfo/jtnode_host", [])
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
+ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
+
+has_resourcemanager = not len(rm_host) == 0
+has_slaves = not len(slave_hosts) == 0
+has_nagios = not len(hagios_server_hosts) == 0
+has_oozie_server = not len(oozie_servers)  == 0
+has_hcat_server_host = not len(hcat_server_hosts)  == 0
+has_hive_server_host = not len(hive_server_host)  == 0
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_zk_host = not len(zk_hosts) == 0
+has_ganglia_server = not len(ganglia_server_hosts) == 0
+
+is_namenode_master = hostname in namenode_host
+is_jtnode_master = hostname in jtnode_host
+is_rmnode_master = hostname in rm_host
+is_hsnode_master = hostname in hs_host
+is_hbase_master = hostname in hbase_master_hosts
+is_slave = hostname in slave_hosts
+if has_ganglia_server:
+  ganglia_server_host = ganglia_server_hosts[0]
+#hadoop params
+hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}")
+hadoop_lib_home = "/usr/lib/hadoop/lib"
+hadoop_conf_dir = "/etc/hadoop/conf"
+hadoop_pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix']
+hadoop_home = "/usr"
+hadoop_bin = "/usr/lib/hadoop/bin"
+
+task_log4j_properties_location = os.path.join(hadoop_conf_dir, "task-log4j.properties")
+limits_conf_dir = "/etc/security/limits.d"
+
+hdfs_log_dir_prefix = config['configurations']['global']['hdfs_log_dir_prefix']
+hbase_tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
+#db params
+server_db_name = config['hostLevelParams']['db_name']
+db_driver_filename = config['hostLevelParams']['db_driver_filename']
+oracle_driver_url = config['hostLevelParams']['oracle_jdbc_url']
+mysql_driver_url = config['hostLevelParams']['mysql_jdbc_url']
+
+ambari_db_rca_url = config['hostLevelParams']['ambari_db_rca_url']
+ambari_db_rca_driver = config['hostLevelParams']['ambari_db_rca_driver']
+ambari_db_rca_username = config['hostLevelParams']['ambari_db_rca_username']
+ambari_db_rca_password = config['hostLevelParams']['ambari_db_rca_password']
+
+rca_enabled = config['configurations']['global']['rca_enabled']
+rca_disabled_prefix = "###"
+if rca_enabled == True:
+  rca_prefix = ""
+else:
+  rca_prefix = rca_disabled_prefix
+
+#hadoop-env.sh
+java_home = config['hostLevelParams']['java_home']
+if System.get_instance().platform == "suse":
+  jsvc_path = "/usr/lib/bigtop-utils"
+else:
+  jsvc_path = "/usr/libexec/bigtop-utils"
+
+hadoop_heapsize = config['configurations']['global']['hadoop_heapsize']
+namenode_heapsize = config['configurations']['global']['namenode_heapsize']
+namenode_opt_newsize =  config['configurations']['global']['namenode_opt_newsize']
+namenode_opt_maxnewsize =  config['configurations']['global']['namenode_opt_maxnewsize']
+
+jtnode_opt_newsize = default("jtnode_opt_newsize","200m")
+jtnode_opt_maxnewsize = default("jtnode_opt_maxnewsize","200m")
+jtnode_heapsize =  default("jtnode_heapsize","1024m")
+ttnode_heapsize = "1024m"
+
+dtnode_heapsize = config['configurations']['global']['dtnode_heapsize']
+mapred_pid_dir_prefix = default("mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
+mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
+hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
+mapred_log_dir_prefix = default("mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
+
+#taskcontroller.cfg
+
+mapred_local_dir = "/tmp/hadoop-mapred/mapred/local"
+
+#log4j.properties
+
+yarn_log_dir_prefix = default("yarn_log_dir_prefix","/var/log/hadoop-yarn")
+
+#hdfs ha properties
+dfs_ha_enabled = False
+dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
+dfs_ha_namenode_ids = default(format("hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
+if dfs_ha_namenode_ids:
+  dfs_ha_namenode_ids_array_len = len(dfs_ha_namenode_ids.split(","))
+  if dfs_ha_namenode_ids_array_len > 1:
+    dfs_ha_enabled = True
+
+if dfs_ha_enabled:
+  for nn_id in dfs_ha_namenode_ids:
+    nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
+    if hostname in nn_host:
+      namenode_id = nn_id
+  namenode_id = None
+
+dfs_hosts = default('/configurations/hdfs-site/dfs.hosts', None)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/scripts/shared_initialization.py
new file mode 100644
index 0000000..7b406e1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/scripts/shared_initialization.py
@@ -0,0 +1,322 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+
+from resource_management import *
+
+def setup_java():
+  """
+  Installs jdk using specific params, that comes from ambari-server
+  """
+  import params
+
+  jdk_curl_target = format("{artifact_dir}/{jdk_name}")
+  java_dir = os.path.dirname(params.java_home)
+  java_exec = format("{java_home}/bin/java")
+  
+  if not params.jdk_name:
+    return
+  
+  Execute(format("mkdir -p {artifact_dir} ; curl -kf --retry 10 {jdk_location}/{jdk_name} -o {jdk_curl_target}"),
+          path = ["/bin","/usr/bin/"],
+          not_if = format("test -e {java_exec}"))
+
+  if params.jdk_name.endswith(".bin"):
+    install_cmd = format("mkdir -p {java_dir} ; chmod +x {jdk_curl_target}; cd {java_dir} ; echo A | {jdk_curl_target} -noregister > /dev/null 2>&1")
+  elif params.jdk_name.endswith(".gz"):
+    install_cmd = format("mkdir -p {java_dir} ; cd {java_dir} ; tar -xf {jdk_curl_target} > /dev/null 2>&1")
+  
+  Execute(install_cmd,
+          path = ["/bin","/usr/bin/"],
+          not_if = format("test -e {java_exec}")
+  )
+  jce_curl_target = format("{artifact_dir}/{jce_policy_zip}")
+  download_jce = format("mkdir -p {artifact_dir}; curl -kf --retry 10 {jce_location}/{jce_policy_zip} -o {jce_curl_target}")
+  Execute( download_jce,
+        path = ["/bin","/usr/bin/"],
+        not_if =format("test -e {jce_curl_target}"),
+        ignore_failures = True
+  )
+  
+  if params.security_enabled:
+    security_dir = format("{java_home}/jre/lib/security")
+    extract_cmd = format("rm -f local_policy.jar; rm -f US_export_policy.jar; unzip -o -j -q {jce_curl_target}")
+    Execute(extract_cmd,
+          only_if = format("test -e {security_dir} && test -f {jce_curl_target}"),
+          cwd  = security_dir,
+          path = ['/bin/','/usr/bin']
+    )
+
+def setup_hadoop():
+  """
+  Setup hadoop files and directories
+  """
+  import params
+
+  File(os.path.join(params.snmp_conf_dir, 'snmpd.conf'),
+       content=Template("snmpd.conf.j2"))
+  Service("snmpd",
+          action = "restart")
+
+  Execute("/bin/echo 0 > /selinux/enforce",
+          only_if="test -f /selinux/enforce"
+  )
+
+  install_snappy()
+
+  #directories
+  Directory(params.hadoop_conf_dir,
+            recursive=True,
+            owner='root',
+            group='root'
+  )
+  Directory(params.hdfs_log_dir_prefix,
+            recursive=True,
+            owner='root',
+            group='root'
+  )
+  Directory(params.hadoop_pid_dir_prefix,
+            recursive=True,
+            owner='root',
+            group='root'
+  )
+
+  #files
+  File(os.path.join(params.limits_conf_dir, 'hdfs.conf'),
+       owner='root',
+       group='root',
+       mode=0644,
+       content=Template("hdfs.conf.j2")
+  )
+  if params.security_enabled:
+    File(os.path.join(params.hadoop_bin, "task-controller"),
+         owner="root",
+         group=params.mapred_tt_group,
+         mode=06050
+    )
+    tc_mode = 0644
+    tc_owner = "root"
+  else:
+    tc_mode = None
+    tc_owner = params.hdfs_user
+
+  if tc_mode:
+    File(os.path.join(params.hadoop_conf_dir, 'taskcontroller.cfg'),
+         owner = tc_owner,
+         mode = tc_mode,
+         group = params.mapred_tt_group,
+         content=Template("taskcontroller.cfg.j2")
+    )
+  else:
+    File(os.path.join(params.hadoop_conf_dir, 'taskcontroller.cfg'),
+         owner=tc_owner,
+         content=Template("taskcontroller.cfg.j2")
+    )
+  for file in ['hadoop-env.sh', 'commons-logging.properties', 'slaves']:
+    File(os.path.join(params.hadoop_conf_dir, file),
+         owner=tc_owner,
+         content=Template(file + ".j2")
+    )
+
+  health_check_template = "health_check" #for stack 1 use 'health_check'
+  File(os.path.join(params.hadoop_conf_dir, "health_check"),
+       owner=tc_owner,
+       content=Template(health_check_template + ".j2")
+  )
+
+  File(os.path.join(params.hadoop_conf_dir, "log4j.properties"),
+       owner=params.hdfs_user,
+       content=Template("log4j.properties.j2")
+  )
+
+  update_log4j_props(os.path.join(params.hadoop_conf_dir, "log4j.properties"))
+
+  File(os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"),
+       owner=params.hdfs_user,
+       content=Template("hadoop-metrics2.properties.j2")
+  )
+
+  db_driver_dload_cmd = ""
+  if params.server_db_name == 'oracle' and params.oracle_driver_url != "":
+    db_driver_dload_cmd = format(
+      "curl -kf --retry 5 {oracle_driver_url} -o {hadoop_lib_home}/{db_driver_filename}")
+  elif params.server_db_name == 'mysql' and params.mysql_driver_url != "":
+    db_driver_dload_cmd = format(
+      "curl -kf --retry 5 {mysql_driver_url} -o {hadoop_lib_home}/{db_driver_filename}")
+
+  if db_driver_dload_cmd:
+    Execute(db_driver_dload_cmd,
+            not_if =format("test -e {hadoop_lib_home}/{db_driver_filename}")
+    )
+
+
+def setup_configs():
+  """
+  Creates configs for services DHFS mapred
+  """
+  import params
+
+  if "mapred-queue-acls" in params.config['configurations']:
+    XmlConfig("mapred-queue-acls.xml",
+              conf_dir=params.hadoop_conf_dir,
+              configurations=params.config['configurations'][
+                'mapred-queue-acls'],
+              owner=params.mapred_user,
+              group=params.user_group
+    )
+  elif os.path.exists(
+      os.path.join(params.hadoop_conf_dir, "mapred-queue-acls.xml")):
+    File(os.path.join(params.hadoop_conf_dir, "mapred-queue-acls.xml"),
+         owner=params.mapred_user,
+         group=params.user_group
+    )
+
+  if "hadoop-policy" in params.config['configurations']:
+    XmlConfig("hadoop-policy.xml",
+              conf_dir=params.hadoop_conf_dir,
+              configurations=params.config['configurations']['hadoop-policy'],
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+
+  XmlConfig("core-site.xml",
+            conf_dir=params.hadoop_conf_dir,
+            configurations=params.config['configurations']['core-site'],
+            owner=params.hdfs_user,
+            group=params.user_group
+  )
+
+  if "mapred-site" in params.config['configurations']:
+    XmlConfig("mapred-site.xml",
+              conf_dir=params.hadoop_conf_dir,
+              configurations=params.config['configurations']['mapred-site'],
+              owner=params.mapred_user,
+              group=params.user_group
+    )
+
+  File(params.task_log4j_properties_location,
+       content=StaticFile("task-log4j.properties"),
+       mode=0755
+  )
+
+  if "capacity-scheduler" in params.config['configurations']:
+    XmlConfig("capacity-scheduler.xml",
+              conf_dir=params.hadoop_conf_dir,
+              configurations=params.config['configurations'][
+                'capacity-scheduler'],
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+
+  XmlConfig("hdfs-site.xml",
+            conf_dir=params.hadoop_conf_dir,
+            configurations=params.config['configurations']['hdfs-site'],
+            owner=params.hdfs_user,
+            group=params.user_group
+  )
+
+  # if params.stack_version[0] == "1":
+  Link('/usr/lib/hadoop/lib/hadoop-tools.jar',
+       to = '/usr/lib/hadoop/hadoop-tools.jar'
+  )
+
+  if os.path.exists(os.path.join(params.hadoop_conf_dir, 'configuration.xsl')):
+    File(os.path.join(params.hadoop_conf_dir, 'configuration.xsl'),
+         owner=params.hdfs_user,
+         group=params.user_group
+    )
+  if os.path.exists(os.path.join(params.hadoop_conf_dir, 'fair-scheduler.xml')):
+    File(os.path.join(params.hadoop_conf_dir, 'fair-scheduler.xml'),
+         owner=params.mapred_user,
+         group=params.user_group
+    )
+  if os.path.exists(os.path.join(params.hadoop_conf_dir, 'masters')):
+    File(os.path.join(params.hadoop_conf_dir, 'masters'),
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+  if os.path.exists(
+      os.path.join(params.hadoop_conf_dir, 'ssl-client.xml.example')):
+    File(os.path.join(params.hadoop_conf_dir, 'ssl-client.xml.example'),
+         owner=params.mapred_user,
+         group=params.user_group
+    )
+  if os.path.exists(
+      os.path.join(params.hadoop_conf_dir, 'ssl-server.xml.example')):
+    File(os.path.join(params.hadoop_conf_dir, 'ssl-server.xml.example'),
+         owner=params.mapred_user,
+         group=params.user_group
+    )
+
+  # generate_include_file()
+
+def update_log4j_props(file):
+  import params
+
+  property_map = {
+    'ambari.jobhistory.database': params.ambari_db_rca_url,
+    'ambari.jobhistory.driver': params.ambari_db_rca_driver,
+    'ambari.jobhistory.user': params.ambari_db_rca_username,
+    'ambari.jobhistory.password': params.ambari_db_rca_password,
+    'ambari.jobhistory.logger': 'DEBUG,JHA',
+
+    'log4j.appender.JHA': 'org.apache.ambari.log4j.hadoop.mapreduce.jobhistory.JobHistoryAppender',
+    'log4j.appender.JHA.database': '${ambari.jobhistory.database}',
+    'log4j.appender.JHA.driver': '${ambari.jobhistory.driver}',
+    'log4j.appender.JHA.user': '${ambari.jobhistory.user}',
+    'log4j.appender.JHA.password': '${ambari.jobhistory.password}',
+
+    'log4j.logger.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger': '${ambari.jobhistory.logger}',
+    'log4j.additivity.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger': 'true'
+  }
+  for key in property_map:
+    value = property_map[key]
+    Execute(format(
+      "sed -i 's~\\({rca_disabled_prefix}\\)\\?{key}=.*~{rca_prefix}{key}={value}~' {file}"))
+
+
+def generate_include_file():
+  import params
+
+  if params.dfs_hosts and params.has_slaves:
+    include_hosts_list = params.slave_hosts
+    File(params.dfs_hosts,
+         content=Template("include_hosts_list.j2"),
+         owner=params.hdfs_user,
+         group=params.user_group
+    )
+
+
+def install_snappy():
+  import params
+
+  snappy_so = "libsnappy.so"
+  so_target_dir_x86 = format("{hadoop_lib_home}/native/Linux-i386-32")
+  so_target_dir_x64 = format("{hadoop_lib_home}/native/Linux-amd64-64")
+  so_target_x86 = format("{so_target_dir_x86}/{snappy_so}")
+  so_target_x64 = format("{so_target_dir_x64}/{snappy_so}")
+  so_src_dir_x86 = format("{hadoop_home}/lib")
+  so_src_dir_x64 = format("{hadoop_home}/lib64")
+  so_src_x86 = format("{so_src_dir_x86}/{snappy_so}")
+  so_src_x64 = format("{so_src_dir_x64}/{snappy_so}")
+  Execute(
+    format("mkdir -p {so_target_dir_x86}; ln -sf {so_src_x86} {so_target_x86}"))
+  Execute(
+    format("mkdir -p {so_target_dir_x64}; ln -sf {so_src_x64} {so_target_x64}"))


[06/37] AMBARI-4341. Rename 2.0.8 to 2.1.1 in the stack definition. (mahadev)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/nagios.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/nagios.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/nagios.cfg.j2
new file mode 100644
index 0000000..acb2522
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/nagios.cfg.j2
@@ -0,0 +1,1349 @@
+##############################################################################
+#
+# NAGIOS.CFG - Sample Main Config File for Nagios 3.2.3
+#
+# Read the documentation for more information on this configuration
+# file.  I've provided some comments here, but things may not be so
+# clear without further explanation.
+#
+# Last Modified: 12-14-2008
+#
+##############################################################################
+
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+# LOG FILE
+# This is the main log file where service and host events are logged
+# for historical purposes.  This should be the first option specified 
+# in the config file!!!
+
+log_file=/var/log/nagios/nagios.log
+
+
+
+# OBJECT CONFIGURATION FILE(S)
+# These are the object configuration files in which you define hosts,
+# host groups, contacts, contact groups, services, etc.
+# You can split your object definitions across several config files
+# if you wish (as shown below), or keep them all in a single config file.
+
+# You can specify individual object config files as shown below:
+cfg_file=/etc/nagios/objects/commands.cfg
+cfg_file=/etc/nagios/objects/contacts.cfg
+cfg_file=/etc/nagios/objects/timeperiods.cfg
+cfg_file=/etc/nagios/objects/templates.cfg
+
+# Definitions for monitoring the local (Linux) host
+#cfg_file=/etc/nagios/objects/localhost.cfg
+
+# Definitions for monitoring a Windows machine
+#cfg_file=/etc/nagios/objects/windows.cfg
+
+# Definitions for monitoring a router/switch
+#cfg_file=/etc/nagios/objects/switch.cfg
+
+# Definitions for monitoring a network printer
+#cfg_file=/etc/nagios/objects/printer.cfg
+
+# Definitions for hadoop servers
+cfg_file={{nagios_host_cfg}}
+cfg_file={{nagios_hostgroup_cfg}}
+cfg_file={{nagios_servicegroup_cfg}}
+cfg_file={{nagios_service_cfg}}
+cfg_file={{nagios_command_cfg}}
+
+
+# You can also tell Nagios to process all config files (with a .cfg
+# extension) in a particular directory by using the cfg_dir
+# directive as shown below:
+
+#cfg_dir=/etc/nagios/servers
+#cfg_dir=/etc/nagios/printers
+#cfg_dir=/etc/nagios/switches
+#cfg_dir=/etc/nagios/routers
+
+
+
+
+# OBJECT CACHE FILE
+# This option determines where object definitions are cached when
+# Nagios starts/restarts.  The CGIs read object definitions from 
+# this cache file (rather than looking at the object config files
+# directly) in order to prevent inconsistencies that can occur
+# when the config files are modified after Nagios starts.
+
+object_cache_file=/var/nagios/objects.cache
+
+
+
+# PRE-CACHED OBJECT FILE
+# This options determines the location of the precached object file.
+# If you run Nagios with the -p command line option, it will preprocess
+# your object configuration file(s) and write the cached config to this
+# file.  You can then start Nagios with the -u option to have it read
+# object definitions from this precached file, rather than the standard
+# object configuration files (see the cfg_file and cfg_dir options above).
+# Using a precached object file can speed up the time needed to (re)start 
+# the Nagios process if you've got a large and/or complex configuration.
+# Read the documentation section on optimizing Nagios to find our more
+# about how this feature works.
+
+precached_object_file=/var/nagios/objects.precache
+
+
+
+# RESOURCE FILE
+# This is an optional resource file that contains $USERx$ macro
+# definitions. Multiple resource files can be specified by using
+# multiple resource_file definitions.  The CGIs will not attempt to
+# read the contents of resource files, so information that is
+# considered to be sensitive (usernames, passwords, etc) can be
+# defined as macros in this file and restrictive permissions (600)
+# can be placed on this file.
+
+resource_file={{nagios_resource_cfg}}
+
+
+
+# STATUS FILE
+# This is where the current status of all monitored services and
+# hosts is stored.  Its contents are read and processed by the CGIs.
+# The contents of the status file are deleted every time Nagios
+#  restarts.
+
+status_file=/var/nagios/status.dat
+
+
+
+# STATUS FILE UPDATE INTERVAL
+# This option determines the frequency (in seconds) that
+# Nagios will periodically dump program, host, and 
+# service status data.
+
+status_update_interval=10
+
+
+
+# NAGIOS USER
+# This determines the effective user that Nagios should run as.  
+# You can either supply a username or a UID.
+
+nagios_user={{nagios_user}}
+
+
+
+# NAGIOS GROUP
+# This determines the effective group that Nagios should run as.  
+# You can either supply a group name or a GID.
+
+nagios_group={{nagios_group}}
+
+
+
+# EXTERNAL COMMAND OPTION
+# This option allows you to specify whether or not Nagios should check
+# for external commands (in the command file defined below).  By default
+# Nagios will *not* check for external commands, just to be on the
+# cautious side.  If you want to be able to use the CGI command interface
+# you will have to enable this.
+# Values: 0 = disable commands, 1 = enable commands
+
+check_external_commands=1
+
+
+
+# EXTERNAL COMMAND CHECK INTERVAL
+# This is the interval at which Nagios should check for external commands.
+# This value works of the interval_length you specify later.  If you leave
+# that at its default value of 60 (seconds), a value of 1 here will cause
+# Nagios to check for external commands every minute.  If you specify a
+# number followed by an "s" (i.e. 15s), this will be interpreted to mean
+# actual seconds rather than a multiple of the interval_length variable.
+# Note: In addition to reading the external command file at regularly 
+# scheduled intervals, Nagios will also check for external commands after
+# event handlers are executed.
+# NOTE: Setting this value to -1 causes Nagios to check the external
+# command file as often as possible.
+
+#command_check_interval=15s
+command_check_interval=-1
+
+
+
+# EXTERNAL COMMAND FILE
+# This is the file that Nagios checks for external command requests.
+# It is also where the command CGI will write commands that are submitted
+# by users, so it must be writeable by the user that the web server
+# is running as (usually 'nobody').  Permissions should be set at the 
+# directory level instead of on the file, as the file is deleted every
+# time its contents are processed.
+
+command_file=/var/nagios/rw/nagios.cmd
+
+
+
+# EXTERNAL COMMAND BUFFER SLOTS
+# This settings is used to tweak the number of items or "slots" that
+# the Nagios daemon should allocate to the buffer that holds incoming 
+# external commands before they are processed.  As external commands 
+# are processed by the daemon, they are removed from the buffer.  
+
+external_command_buffer_slots=4096
+
+
+
+# LOCK FILE
+# This is the lockfile that Nagios will use to store its PID number
+# in when it is running in daemon mode.
+
+lock_file={{nagios_pid_file}}
+
+
+
+# TEMP FILE
+# This is a temporary file that is used as scratch space when Nagios
+# updates the status log, cleans the comment file, etc.  This file
+# is created, used, and deleted throughout the time that Nagios is
+# running.
+
+temp_file=/var/nagios/nagios.tmp
+
+
+
+# TEMP PATH
+# This is path where Nagios can create temp files for service and
+# host check results, etc.
+
+temp_path=/tmp
+
+
+
+# EVENT BROKER OPTIONS
+# Controls what (if any) data gets sent to the event broker.
+# Values:  0      = Broker nothing
+#         -1      = Broker everything
+#         <other> = See documentation
+
+event_broker_options=-1
+
+
+
+# EVENT BROKER MODULE(S)
+# This directive is used to specify an event broker module that should
+# by loaded by Nagios at startup.  Use multiple directives if you want
+# to load more than one module.  Arguments that should be passed to
+# the module at startup are seperated from the module path by a space.
+#
+#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+# WARNING !!! WARNING !!! WARNING !!! WARNING !!! WARNING !!! WARNING
+#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+#
+# Do NOT overwrite modules while they are being used by Nagios or Nagios
+# will crash in a fiery display of SEGFAULT glory.  This is a bug/limitation
+# either in dlopen(), the kernel, and/or the filesystem.  And maybe Nagios...
+#
+# The correct/safe way of updating a module is by using one of these methods:
+#    1. Shutdown Nagios, replace the module file, restart Nagios
+#    2. Delete the original module file, move the new module file into place, restart Nagios
+#
+# Example:
+#
+#   broker_module=<modulepath> [moduleargs]
+
+#broker_module=/somewhere/module1.o
+#broker_module=/somewhere/module2.o arg1 arg2=3 debug=0
+
+
+
+# LOG ROTATION METHOD
+# This is the log rotation method that Nagios should use to rotate
+# the main log file. Values are as follows..
+#	n	= None - don't rotate the log
+#	h	= Hourly rotation (top of the hour)
+#	d	= Daily rotation (midnight every day)
+#	w	= Weekly rotation (midnight on Saturday evening)
+#	m	= Monthly rotation (midnight last day of month)
+
+log_rotation_method=d
+
+
+
+# LOG ARCHIVE PATH
+# This is the directory where archived (rotated) log files should be 
+# placed (assuming you've chosen to do log rotation).
+
+log_archive_path=/var/log/nagios/archives
+
+
+
+# LOGGING OPTIONS
+# If you want messages logged to the syslog facility, as well as the
+# Nagios log file set this option to 1.  If not, set it to 0.
+
+use_syslog=1
+
+
+
+# NOTIFICATION LOGGING OPTION
+# If you don't want notifications to be logged, set this value to 0.
+# If notifications should be logged, set the value to 1.
+
+log_notifications=1
+
+
+
+# SERVICE RETRY LOGGING OPTION
+# If you don't want service check retries to be logged, set this value
+# to 0.  If retries should be logged, set the value to 1.
+
+log_service_retries=1
+
+
+
+# HOST RETRY LOGGING OPTION
+# If you don't want host check retries to be logged, set this value to
+# 0.  If retries should be logged, set the value to 1.
+
+log_host_retries=1
+
+
+
+# EVENT HANDLER LOGGING OPTION
+# If you don't want host and service event handlers to be logged, set
+# this value to 0.  If event handlers should be logged, set the value
+# to 1.
+
+log_event_handlers=1
+
+
+
+# INITIAL STATES LOGGING OPTION
+# If you want Nagios to log all initial host and service states to
+# the main log file (the first time the service or host is checked)
+# you can enable this option by setting this value to 1.  If you
+# are not using an external application that does long term state
+# statistics reporting, you do not need to enable this option.  In
+# this case, set the value to 0.
+
+log_initial_states=0
+
+
+
+# EXTERNAL COMMANDS LOGGING OPTION
+# If you don't want Nagios to log external commands, set this value
+# to 0.  If external commands should be logged, set this value to 1.
+# Note: This option does not include logging of passive service
+# checks - see the option below for controlling whether or not
+# passive checks are logged.
+
+log_external_commands=1
+
+
+
+# PASSIVE CHECKS LOGGING OPTION
+# If you don't want Nagios to log passive host and service checks, set
+# this value to 0.  If passive checks should be logged, set
+# this value to 1.
+
+log_passive_checks=1
+
+
+
+# GLOBAL HOST AND SERVICE EVENT HANDLERS
+# These options allow you to specify a host and service event handler
+# command that is to be run for every host or service state change.
+# The global event handler is executed immediately prior to the event
+# handler that you have optionally specified in each host or
+# service definition. The command argument is the short name of a
+# command definition that you define in your host configuration file.
+# Read the HTML docs for more information.
+
+#global_host_event_handler=somecommand
+#global_service_event_handler=somecommand
+
+
+
+# SERVICE INTER-CHECK DELAY METHOD
+# This is the method that Nagios should use when initially
+# "spreading out" service checks when it starts monitoring.  The
+# default is to use smart delay calculation, which will try to
+# space all service checks out evenly to minimize CPU load.
+# Using the dumb setting will cause all checks to be scheduled
+# at the same time (with no delay between them)!  This is not a
+# good thing for production, but is useful when testing the
+# parallelization functionality.
+#	n	= None - don't use any delay between checks
+#	d	= Use a "dumb" delay of 1 second between checks
+#	s	= Use "smart" inter-check delay calculation
+#       x.xx    = Use an inter-check delay of x.xx seconds
+
+service_inter_check_delay_method=s
+
+
+
+# MAXIMUM SERVICE CHECK SPREAD
+# This variable determines the timeframe (in minutes) from the
+# program start time that an initial check of all services should
+# be completed.  Default is 30 minutes.
+
+max_service_check_spread=30
+
+
+
+# SERVICE CHECK INTERLEAVE FACTOR
+# This variable determines how service checks are interleaved.
+# Interleaving the service checks allows for a more even
+# distribution of service checks and reduced load on remote
+# hosts.  Setting this value to 1 is equivalent to how versions
+# of Nagios previous to 0.0.5 did service checks.  Set this
+# value to s (smart) for automatic calculation of the interleave
+# factor unless you have a specific reason to change it.
+#       s       = Use "smart" interleave factor calculation
+#       x       = Use an interleave factor of x, where x is a
+#                 number greater than or equal to 1.
+
+service_interleave_factor=s
+
+
+
+# HOST INTER-CHECK DELAY METHOD
+# This is the method that Nagios should use when initially
+# "spreading out" host checks when it starts monitoring.  The
+# default is to use smart delay calculation, which will try to
+# space all host checks out evenly to minimize CPU load.
+# Using the dumb setting will cause all checks to be scheduled
+# at the same time (with no delay between them)!
+#	n	= None - don't use any delay between checks
+#	d	= Use a "dumb" delay of 1 second between checks
+#	s	= Use "smart" inter-check delay calculation
+#       x.xx    = Use an inter-check delay of x.xx seconds
+
+host_inter_check_delay_method=s
+
+
+
+# MAXIMUM HOST CHECK SPREAD
+# This variable determines the timeframe (in minutes) from the
+# program start time that an initial check of all hosts should
+# be completed.  Default is 30 minutes.
+
+max_host_check_spread=30
+
+
+
+# MAXIMUM CONCURRENT SERVICE CHECKS
+# This option allows you to specify the maximum number of 
+# service checks that can be run in parallel at any given time.
+# Specifying a value of 1 for this variable essentially prevents
+# any service checks from being parallelized.  A value of 0
+# will not restrict the number of concurrent checks that are
+# being executed.
+
+max_concurrent_checks=0
+
+
+
+# HOST AND SERVICE CHECK REAPER FREQUENCY
+# This is the frequency (in seconds!) that Nagios will process
+# the results of host and service checks.
+
+check_result_reaper_frequency=10
+
+
+
+
+# MAX CHECK RESULT REAPER TIME
+# This is the max amount of time (in seconds) that  a single
+# check result reaper event will be allowed to run before 
+# returning control back to Nagios so it can perform other
+# duties.
+
+max_check_result_reaper_time=30
+
+
+
+
+# CHECK RESULT PATH
+# This is directory where Nagios stores the results of host and
+# service checks that have not yet been processed.
+#
+# Note: Make sure that only one instance of Nagios has access
+# to this directory!  
+
+check_result_path=/var/nagios/spool/checkresults
+
+
+
+
+# MAX CHECK RESULT FILE AGE
+# This option determines the maximum age (in seconds) which check
+# result files are considered to be valid.  Files older than this 
+# threshold will be mercilessly deleted without further processing.
+
+max_check_result_file_age=3600
+
+
+
+
+# CACHED HOST CHECK HORIZON
+# This option determines the maximum amount of time (in seconds)
+# that the state of a previous host check is considered current.
+# Cached host states (from host checks that were performed more
+# recently that the timeframe specified by this value) can immensely
+# improve performance in regards to the host check logic.
+# Too high of a value for this option may result in inaccurate host
+# states being used by Nagios, while a lower value may result in a
+# performance hit for host checks.  Use a value of 0 to disable host
+# check caching.
+
+cached_host_check_horizon=15
+
+
+
+# CACHED SERVICE CHECK HORIZON
+# This option determines the maximum amount of time (in seconds)
+# that the state of a previous service check is considered current.
+# Cached service states (from service checks that were performed more
+# recently that the timeframe specified by this value) can immensely
+# improve performance in regards to predictive dependency checks.
+# Use a value of 0 to disable service check caching.
+
+cached_service_check_horizon=15
+
+
+
+# ENABLE PREDICTIVE HOST DEPENDENCY CHECKS
+# This option determines whether or not Nagios will attempt to execute
+# checks of hosts when it predicts that future dependency logic test
+# may be needed.  These predictive checks can help ensure that your
+# host dependency logic works well.
+# Values:
+#  0 = Disable predictive checks
+#  1 = Enable predictive checks (default)
+
+enable_predictive_host_dependency_checks=1
+
+
+
+# ENABLE PREDICTIVE SERVICE DEPENDENCY CHECKS
+# This option determines whether or not Nagios will attempt to execute
+# checks of service when it predicts that future dependency logic test
+# may be needed.  These predictive checks can help ensure that your
+# service dependency logic works well.
+# Values:
+#  0 = Disable predictive checks
+#  1 = Enable predictive checks (default)
+
+enable_predictive_service_dependency_checks=1
+
+
+
+# SOFT STATE DEPENDENCIES
+# This option determines whether or not Nagios will use soft state 
+# information when checking host and service dependencies. Normally 
+# Nagios will only use the latest hard host or service state when 
+# checking dependencies. If you want it to use the latest state (regardless
+# of whether its a soft or hard state type), enable this option. 
+# Values:
+#  0 = Don't use soft state dependencies (default) 
+#  1 = Use soft state dependencies 
+
+soft_state_dependencies=0
+
+
+
+# TIME CHANGE ADJUSTMENT THRESHOLDS
+# These options determine when Nagios will react to detected changes
+# in system time (either forward or backwards).
+
+#time_change_threshold=900
+
+
+
+# AUTO-RESCHEDULING OPTION
+# This option determines whether or not Nagios will attempt to
+# automatically reschedule active host and service checks to
+# "smooth" them out over time.  This can help balance the load on
+# the monitoring server.  
+# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
+# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
+
+auto_reschedule_checks=0
+
+
+
+# AUTO-RESCHEDULING INTERVAL
+# This option determines how often (in seconds) Nagios will
+# attempt to automatically reschedule checks.  This option only
+# has an effect if the auto_reschedule_checks option is enabled.
+# Default is 30 seconds.
+# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
+# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
+
+auto_rescheduling_interval=30
+
+
+
+# AUTO-RESCHEDULING WINDOW
+# This option determines the "window" of time (in seconds) that
+# Nagios will look at when automatically rescheduling checks.
+# Only host and service checks that occur in the next X seconds
+# (determined by this variable) will be rescheduled. This option
+# only has an effect if the auto_reschedule_checks option is
+# enabled.  Default is 180 seconds (3 minutes).
+# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
+# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
+
+auto_rescheduling_window=180
+
+
+
+# SLEEP TIME
+# This is the number of seconds to sleep between checking for system
+# events and service checks that need to be run.
+
+sleep_time=0.25
+
+
+
+# TIMEOUT VALUES
+# These options control how much time Nagios will allow various
+# types of commands to execute before killing them off.  Options
+# are available for controlling maximum time allotted for
+# service checks, host checks, event handlers, notifications, the
+# ocsp command, and performance data commands.  All values are in
+# seconds.
+
+service_check_timeout=60
+host_check_timeout=30
+event_handler_timeout=30
+notification_timeout=30
+ocsp_timeout=5
+perfdata_timeout=5
+
+
+
+# RETAIN STATE INFORMATION
+# This setting determines whether or not Nagios will save state
+# information for services and hosts before it shuts down.  Upon
+# startup Nagios will reload all saved service and host state
+# information before starting to monitor.  This is useful for 
+# maintaining long-term data on state statistics, etc, but will
+# slow Nagios down a bit when it (re)starts.  Since its only
+# a one-time penalty, I think its well worth the additional
+# startup delay.
+
+retain_state_information=1
+
+
+
+# STATE RETENTION FILE
+# This is the file that Nagios should use to store host and
+# service state information before it shuts down.  The state 
+# information in this file is also read immediately prior to
+# starting to monitor the network when Nagios is restarted.
+# This file is used only if the retain_state_information
+# variable is set to 1.
+
+state_retention_file=/var/nagios/retention.dat
+
+
+
+# RETENTION DATA UPDATE INTERVAL
+# This setting determines how often (in minutes) that Nagios
+# will automatically save retention data during normal operation.
+# If you set this value to 0, Nagios will not save retention
+# data at regular interval, but it will still save retention
+# data before shutting down or restarting.  If you have disabled
+# state retention, this option has no effect.
+
+retention_update_interval=60
+
+
+
+# USE RETAINED PROGRAM STATE
+# This setting determines whether or not Nagios will set 
+# program status variables based on the values saved in the
+# retention file.  If you want to use retained program status
+# information, set this value to 1.  If not, set this value
+# to 0.
+
+use_retained_program_state=1
+
+
+
+# USE RETAINED SCHEDULING INFO
+# This setting determines whether or not Nagios will retain
+# the scheduling info (next check time) for hosts and services
+# based on the values saved in the retention file.  If you
+# If you want to use retained scheduling info, set this
+# value to 1.  If not, set this value to 0.
+
+use_retained_scheduling_info=1
+
+
+
+# RETAINED ATTRIBUTE MASKS (ADVANCED FEATURE)
+# The following variables are used to specify specific host and
+# service attributes that should *not* be retained by Nagios during
+# program restarts.
+#
+# The values of the masks are bitwise ANDs of values specified
+# by the "MODATTR_" definitions found in include/common.h.  
+# For example, if you do not want the current enabled/disabled state
+# of flap detection and event handlers for hosts to be retained, you
+# would use a value of 24 for the host attribute mask...
+# MODATTR_EVENT_HANDLER_ENABLED (8) + MODATTR_FLAP_DETECTION_ENABLED (16) = 24
+
+# This mask determines what host attributes are not retained
+retained_host_attribute_mask=0
+
+# This mask determines what service attributes are not retained
+retained_service_attribute_mask=0
+
+# These two masks determine what process attributes are not retained.
+# There are two masks, because some process attributes have host and service
+# options.  For example, you can disable active host checks, but leave active
+# service checks enabled.
+retained_process_host_attribute_mask=0
+retained_process_service_attribute_mask=0
+
+# These two masks determine what contact attributes are not retained.
+# There are two masks, because some contact attributes have host and
+# service options.  For example, you can disable host notifications for
+# a contact, but leave service notifications enabled for them.
+retained_contact_host_attribute_mask=0
+retained_contact_service_attribute_mask=0
+
+
+
+# INTERVAL LENGTH
+# This is the seconds per unit interval as used in the
+# host/contact/service configuration files.  Setting this to 60 means
+# that each interval is one minute long (60 seconds).  Other settings
+# have not been tested much, so your mileage is likely to vary...
+
+interval_length=60
+
+
+
+# CHECK FOR UPDATES
+# This option determines whether Nagios will automatically check to
+# see if new updates (releases) are available.  It is recommend that you
+# enable this option to ensure that you stay on top of the latest critical
+# patches to Nagios.  Nagios is critical to you - make sure you keep it in
+# good shape.  Nagios will check once a day for new updates. Data collected
+# by Nagios Enterprises from the update check is processed in accordance 
+# with our privacy policy - see http://api.nagios.org for details.
+
+check_for_updates=1
+
+
+
+# BARE UPDATE CHECK
+# This option deterines what data Nagios will send to api.nagios.org when
+# it checks for updates.  By default, Nagios will send information on the 
+# current version of Nagios you have installed, as well as an indicator as
+# to whether this was a new installation or not.  Nagios Enterprises uses
+# this data to determine the number of users running specific version of 
+# Nagios.  Enable this option if you do not want this information to be sent.
+
+bare_update_check=0
+
+
+
+# AGGRESSIVE HOST CHECKING OPTION
+# If you don't want to turn on aggressive host checking features, set
+# this value to 0 (the default).  Otherwise set this value to 1 to
+# enable the aggressive check option.  Read the docs for more info
+# on what aggressive host check is or check out the source code in
+# base/checks.c
+
+use_aggressive_host_checking=0
+
+
+
+# SERVICE CHECK EXECUTION OPTION
+# This determines whether or not Nagios will actively execute
+# service checks when it initially starts.  If this option is 
+# disabled, checks are not actively made, but Nagios can still
+# receive and process passive check results that come in.  Unless
+# you're implementing redundant hosts or have a special need for
+# disabling the execution of service checks, leave this enabled!
+# Values: 1 = enable checks, 0 = disable checks
+
+execute_service_checks=1
+
+
+
+# PASSIVE SERVICE CHECK ACCEPTANCE OPTION
+# This determines whether or not Nagios will accept passive
+# service checks results when it initially (re)starts.
+# Values: 1 = accept passive checks, 0 = reject passive checks
+
+accept_passive_service_checks=1
+
+
+
+# HOST CHECK EXECUTION OPTION
+# This determines whether or not Nagios will actively execute
+# host checks when it initially starts.  If this option is 
+# disabled, checks are not actively made, but Nagios can still
+# receive and process passive check results that come in.  Unless
+# you're implementing redundant hosts or have a special need for
+# disabling the execution of host checks, leave this enabled!
+# Values: 1 = enable checks, 0 = disable checks
+
+execute_host_checks=1
+
+
+
+# PASSIVE HOST CHECK ACCEPTANCE OPTION
+# This determines whether or not Nagios will accept passive
+# host checks results when it initially (re)starts.
+# Values: 1 = accept passive checks, 0 = reject passive checks
+
+accept_passive_host_checks=1
+
+
+
+# NOTIFICATIONS OPTION
+# This determines whether or not Nagios will sent out any host or
+# service notifications when it is initially (re)started.
+# Values: 1 = enable notifications, 0 = disable notifications
+
+enable_notifications=1
+
+
+
+# EVENT HANDLER USE OPTION
+# This determines whether or not Nagios will run any host or
+# service event handlers when it is initially (re)started.  Unless
+# you're implementing redundant hosts, leave this option enabled.
+# Values: 1 = enable event handlers, 0 = disable event handlers
+
+enable_event_handlers=1
+
+
+
+# PROCESS PERFORMANCE DATA OPTION
+# This determines whether or not Nagios will process performance
+# data returned from service and host checks.  If this option is
+# enabled, host performance data will be processed using the
+# host_perfdata_command (defined below) and service performance
+# data will be processed using the service_perfdata_command (also
+# defined below).  Read the HTML docs for more information on
+# performance data.
+# Values: 1 = process performance data, 0 = do not process performance data
+
+process_performance_data=0
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA PROCESSING COMMANDS
+# These commands are run after every host and service check is
+# performed.  These commands are executed only if the
+# enable_performance_data option (above) is set to 1.  The command
+# argument is the short name of a command definition that you 
+# define in your host configuration file.  Read the HTML docs for
+# more information on performance data.
+
+#host_perfdata_command=process-host-perfdata
+#service_perfdata_command=process-service-perfdata
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILES
+# These files are used to store host and service performance data.
+# Performance data is only written to these files if the
+# enable_performance_data option (above) is set to 1.
+
+#host_perfdata_file=/tmp/host-perfdata
+#service_perfdata_file=/tmp/service-perfdata
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE TEMPLATES
+# These options determine what data is written (and how) to the
+# performance data files.  The templates may contain macros, special
+# characters (\t for tab, \r for carriage return, \n for newline)
+# and plain text.  A newline is automatically added after each write
+# to the performance data file.  Some examples of what you can do are
+# shown below.
+
+#host_perfdata_file_template=[HOSTPERFDATA]\t$TIMET$\t$HOSTNAME$\t$HOSTEXECUTIONTIME$\t$HOSTOUTPUT$\t$HOSTPERFDATA$
+#service_perfdata_file_template=[SERVICEPERFDATA]\t$TIMET$\t$HOSTNAME$\t$SERVICEDESC$\t$SERVICEEXECUTIONTIME$\t$SERVICELATENCY$\t$SERVICEOUTPUT$\t$SERVICEPERFDATA$
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE MODES
+# This option determines whether or not the host and service
+# performance data files are opened in write ("w") or append ("a")
+# mode. If you want to use named pipes, you should use the special
+# pipe ("p") mode which avoid blocking at startup, otherwise you will
+# likely want the defult append ("a") mode.
+
+#host_perfdata_file_mode=a
+#service_perfdata_file_mode=a
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE PROCESSING INTERVAL
+# These options determine how often (in seconds) the host and service
+# performance data files are processed using the commands defined
+# below.  A value of 0 indicates the files should not be periodically
+# processed.
+
+#host_perfdata_file_processing_interval=0
+#service_perfdata_file_processing_interval=0
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE PROCESSING COMMANDS
+# These commands are used to periodically process the host and
+# service performance data files.  The interval at which the
+# processing occurs is determined by the options above.
+
+#host_perfdata_file_processing_command=process-host-perfdata-file
+#service_perfdata_file_processing_command=process-service-perfdata-file
+
+
+
+# OBSESS OVER SERVICE CHECKS OPTION
+# This determines whether or not Nagios will obsess over service
+# checks and run the ocsp_command defined below.  Unless you're
+# planning on implementing distributed monitoring, do not enable
+# this option.  Read the HTML docs for more information on
+# implementing distributed monitoring.
+# Values: 1 = obsess over services, 0 = do not obsess (default)
+
+obsess_over_services=0
+
+
+
+# OBSESSIVE COMPULSIVE SERVICE PROCESSOR COMMAND
+# This is the command that is run for every service check that is
+# processed by Nagios.  This command is executed only if the
+# obsess_over_services option (above) is set to 1.  The command 
+# argument is the short name of a command definition that you
+# define in your host configuration file. Read the HTML docs for
+# more information on implementing distributed monitoring.
+
+#ocsp_command=somecommand
+
+
+
+# OBSESS OVER HOST CHECKS OPTION
+# This determines whether or not Nagios will obsess over host
+# checks and run the ochp_command defined below.  Unless you're
+# planning on implementing distributed monitoring, do not enable
+# this option.  Read the HTML docs for more information on
+# implementing distributed monitoring.
+# Values: 1 = obsess over hosts, 0 = do not obsess (default)
+
+obsess_over_hosts=0
+
+
+
+# OBSESSIVE COMPULSIVE HOST PROCESSOR COMMAND
+# This is the command that is run for every host check that is
+# processed by Nagios.  This command is executed only if the
+# obsess_over_hosts option (above) is set to 1.  The command 
+# argument is the short name of a command definition that you
+# define in your host configuration file. Read the HTML docs for
+# more information on implementing distributed monitoring.
+
+#ochp_command=somecommand
+
+
+
+# TRANSLATE PASSIVE HOST CHECKS OPTION
+# This determines whether or not Nagios will translate
+# DOWN/UNREACHABLE passive host check results into their proper
+# state for this instance of Nagios.  This option is useful
+# if you have distributed or failover monitoring setup.  In
+# these cases your other Nagios servers probably have a different
+# "view" of the network, with regards to the parent/child relationship
+# of hosts.  If a distributed monitoring server thinks a host
+# is DOWN, it may actually be UNREACHABLE from the point of
+# this Nagios instance.  Enabling this option will tell Nagios
+# to translate any DOWN or UNREACHABLE host states it receives
+# passively into the correct state from the view of this server.
+# Values: 1 = perform translation, 0 = do not translate (default)
+
+translate_passive_host_checks=0
+
+
+
+# PASSIVE HOST CHECKS ARE SOFT OPTION
+# This determines whether or not Nagios will treat passive host
+# checks as being HARD or SOFT.  By default, a passive host check
+# result will put a host into a HARD state type.  This can be changed
+# by enabling this option.
+# Values: 0 = passive checks are HARD, 1 = passive checks are SOFT
+
+passive_host_checks_are_soft=0
+
+
+
+# ORPHANED HOST/SERVICE CHECK OPTIONS
+# These options determine whether or not Nagios will periodically 
+# check for orphaned host service checks.  Since service checks are
+# not rescheduled until the results of their previous execution 
+# instance are processed, there exists a possibility that some
+# checks may never get rescheduled.  A similar situation exists for
+# host checks, although the exact scheduling details differ a bit
+# from service checks.  Orphaned checks seem to be a rare
+# problem and should not happen under normal circumstances.
+# If you have problems with service checks never getting
+# rescheduled, make sure you have orphaned service checks enabled.
+# Values: 1 = enable checks, 0 = disable checks
+
+check_for_orphaned_services=1
+check_for_orphaned_hosts=1
+
+
+
+# SERVICE FRESHNESS CHECK OPTION
+# This option determines whether or not Nagios will periodically
+# check the "freshness" of service results.  Enabling this option
+# is useful for ensuring passive checks are received in a timely
+# manner.
+# Values: 1 = enabled freshness checking, 0 = disable freshness checking
+
+check_service_freshness=1
+
+
+
+# SERVICE FRESHNESS CHECK INTERVAL
+# This setting determines how often (in seconds) Nagios will
+# check the "freshness" of service check results.  If you have
+# disabled service freshness checking, this option has no effect.
+
+service_freshness_check_interval=60
+
+
+
+# HOST FRESHNESS CHECK OPTION
+# This option determines whether or not Nagios will periodically
+# check the "freshness" of host results.  Enabling this option
+# is useful for ensuring passive checks are received in a timely
+# manner.
+# Values: 1 = enabled freshness checking, 0 = disable freshness checking
+
+check_host_freshness=0
+
+
+
+# HOST FRESHNESS CHECK INTERVAL
+# This setting determines how often (in seconds) Nagios will
+# check the "freshness" of host check results.  If you have
+# disabled host freshness checking, this option has no effect.
+
+host_freshness_check_interval=60
+
+
+
+
+# ADDITIONAL FRESHNESS THRESHOLD LATENCY
+# This setting determines the number of seconds that Nagios
+# will add to any host and service freshness thresholds that
+# it calculates (those not explicitly specified by the user).
+
+additional_freshness_latency=15
+
+
+
+
+# FLAP DETECTION OPTION
+# This option determines whether or not Nagios will try
+# and detect hosts and services that are "flapping".  
+# Flapping occurs when a host or service changes between
+# states too frequently.  When Nagios detects that a 
+# host or service is flapping, it will temporarily suppress
+# notifications for that host/service until it stops
+# flapping.  Flap detection is very experimental, so read
+# the HTML documentation before enabling this feature!
+# Values: 1 = enable flap detection
+#         0 = disable flap detection (default)
+
+enable_flap_detection=1
+
+
+
+# FLAP DETECTION THRESHOLDS FOR HOSTS AND SERVICES
+# Read the HTML documentation on flap detection for
+# an explanation of what this option does.  This option
+# has no effect if flap detection is disabled.
+
+low_service_flap_threshold=5.0
+high_service_flap_threshold=20.0
+low_host_flap_threshold=5.0
+high_host_flap_threshold=20.0
+
+
+
+# DATE FORMAT OPTION
+# This option determines how short dates are displayed. Valid options
+# include:
+#	us		(MM-DD-YYYY HH:MM:SS)
+#	euro    	(DD-MM-YYYY HH:MM:SS)
+#	iso8601		(YYYY-MM-DD HH:MM:SS)
+#	strict-iso8601	(YYYY-MM-DDTHH:MM:SS)
+#
+
+date_format=us
+
+
+
+
+# TIMEZONE OFFSET
+# This option is used to override the default timezone that this
+# instance of Nagios runs in.  If not specified, Nagios will use
+# the system configured timezone.
+#
+# NOTE: In order to display the correct timezone in the CGIs, you
+# will also need to alter the Apache directives for the CGI path 
+# to include your timezone.  Example:
+#
+#   <Directory "/usr/local/nagios/sbin/">
+#      SetEnv TZ "Australia/Brisbane"
+#      ...
+#   </Directory>
+
+#use_timezone=US/Mountain
+#use_timezone=Australia/Brisbane
+
+
+
+
+# P1.PL FILE LOCATION
+# This value determines where the p1.pl perl script (used by the
+# embedded Perl interpreter) is located.  If you didn't compile
+# Nagios with embedded Perl support, this option has no effect.
+
+p1_file = {{nagios_p1_pl}}
+
+
+
+# EMBEDDED PERL INTERPRETER OPTION
+# This option determines whether or not the embedded Perl interpreter
+# will be enabled during runtime.  This option has no effect if Nagios
+# has not been compiled with support for embedded Perl.
+# Values: 0 = disable interpreter, 1 = enable interpreter
+
+enable_embedded_perl=1
+
+
+
+# EMBEDDED PERL USAGE OPTION
+# This option determines whether or not Nagios will process Perl plugins
+# and scripts with the embedded Perl interpreter if the plugins/scripts
+# do not explicitly indicate whether or not it is okay to do so. Read
+# the HTML documentation on the embedded Perl interpreter for more 
+# information on how this option works.
+
+use_embedded_perl_implicitly=1
+
+
+
+# ILLEGAL OBJECT NAME CHARACTERS
+# This option allows you to specify illegal characters that cannot
+# be used in host names, service descriptions, or names of other
+# object types.
+
+illegal_object_name_chars=`~!$%^&*|'"<>?,()=
+
+
+
+# ILLEGAL MACRO OUTPUT CHARACTERS
+# This option allows you to specify illegal characters that are
+# stripped from macros before being used in notifications, event
+# handlers, etc.  This DOES NOT affect macros used in service or
+# host check commands.
+# The following macros are stripped of the characters you specify:
+#	$HOSTOUTPUT$
+#	$HOSTPERFDATA$
+#	$HOSTACKAUTHOR$
+#	$HOSTACKCOMMENT$
+#	$SERVICEOUTPUT$
+#	$SERVICEPERFDATA$
+#	$SERVICEACKAUTHOR$
+#	$SERVICEACKCOMMENT$
+
+illegal_macro_output_chars=`~$&|'"<>
+
+
+
+# REGULAR EXPRESSION MATCHING
+# This option controls whether or not regular expression matching
+# takes place in the object config files.  Regular expression
+# matching is used to match host, hostgroup, service, and service
+# group names/descriptions in some fields of various object types.
+# Values: 1 = enable regexp matching, 0 = disable regexp matching
+
+use_regexp_matching=0
+
+
+
+# "TRUE" REGULAR EXPRESSION MATCHING
+# This option controls whether or not "true" regular expression 
+# matching takes place in the object config files.  This option
+# only has an effect if regular expression matching is enabled
+# (see above).  If this option is DISABLED, regular expression
+# matching only occurs if a string contains wildcard characters
+# (* and ?).  If the option is ENABLED, regexp matching occurs
+# all the time (which can be annoying).
+# Values: 1 = enable true matching, 0 = disable true matching
+
+use_true_regexp_matching=0
+
+
+
+# ADMINISTRATOR EMAIL/PAGER ADDRESSES
+# The email and pager address of a global administrator (likely you).
+# Nagios never uses these values itself, but you can access them by
+# using the $ADMINEMAIL$ and $ADMINPAGER$ macros in your notification
+# commands.
+
+admin_email=nagios@localhost
+admin_pager=pagenagios@localhost
+
+
+
+# DAEMON CORE DUMP OPTION
+# This option determines whether or not Nagios is allowed to create
+# a core dump when it runs as a daemon.  Note that it is generally
+# considered bad form to allow this, but it may be useful for
+# debugging purposes.  Enabling this option doesn't guarantee that
+# a core file will be produced, but that's just life...
+# Values: 1 - Allow core dumps
+#         0 - Do not allow core dumps (default)
+
+daemon_dumps_core=0
+
+
+
+# LARGE INSTALLATION TWEAKS OPTION
+# This option determines whether or not Nagios will take some shortcuts
+# which can save on memory and CPU usage in large Nagios installations.
+# Read the documentation for more information on the benefits/tradeoffs
+# of enabling this option.
+# Values: 1 - Enabled tweaks
+#         0 - Disable tweaks (default)
+
+use_large_installation_tweaks=0
+
+
+
+# ENABLE ENVIRONMENT MACROS
+# This option determines whether or not Nagios will make all standard
+# macros available as environment variables when host/service checks
+# and system commands (event handlers, notifications, etc.) are
+# executed.  Enabling this option can cause performance issues in 
+# large installations, as it will consume a bit more memory and (more
+# importantly) consume more CPU.
+# Values: 1 - Enable environment variable macros (default)
+#         0 - Disable environment variable macros
+
+enable_environment_macros=1
+
+
+
+# CHILD PROCESS MEMORY OPTION
+# This option determines whether or not Nagios will free memory in
+# child processes (processed used to execute system commands and host/
+# service checks).  If you specify a value here, it will override
+# program defaults.
+# Value: 1 - Free memory in child processes
+#        0 - Do not free memory in child processes
+
+#free_child_process_memory=1
+
+
+
+# CHILD PROCESS FORKING BEHAVIOR
+# This option determines how Nagios will fork child processes
+# (used to execute system commands and host/service checks).  Normally
+# child processes are fork()ed twice, which provides a very high level
+# of isolation from problems.  Fork()ing once is probably enough and will
+# save a great deal on CPU usage (in large installs), so you might
+# want to consider using this.  If you specify a value here, it will
+# program defaults.
+# Value: 1 - Child processes fork() twice
+#        0 - Child processes fork() just once
+
+#child_processes_fork_twice=1
+
+
+
+# DEBUG LEVEL
+# This option determines how much (if any) debugging information will
+# be written to the debug file.  OR values together to log multiple
+# types of information.
+# Values: 
+#          -1 = Everything
+#          0 = Nothing
+#	   1 = Functions
+#          2 = Configuration
+#          4 = Process information
+#	   8 = Scheduled events
+#          16 = Host/service checks
+#          32 = Notifications
+#          64 = Event broker
+#          128 = External commands
+#          256 = Commands
+#          512 = Scheduled downtime
+#          1024 = Comments
+#          2048 = Macros
+
+debug_level=0
+
+
+
+# DEBUG VERBOSITY
+# This option determines how verbose the debug log out will be.
+# Values: 0 = Brief output
+#         1 = More detailed
+#         2 = Very detailed
+
+debug_verbosity=1
+
+
+
+# DEBUG FILE
+# This option determines where Nagios should write debugging information.
+
+debug_file=/var/log/nagios/nagios.debug
+
+
+
+# MAX DEBUG FILE SIZE
+# This option determines the maximum size (in bytes) of the debug file.  If
+# the file grows larger than this size, it will be renamed with a .old
+# extension.  If a file already exists with a .old extension it will
+# automatically be deleted.  This helps ensure your disk space usage doesn't
+# get out of control when debugging Nagios.
+
+max_debug_file_size=1000000
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/nagios.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/nagios.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/nagios.conf.j2
new file mode 100644
index 0000000..d8936a0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/nagios.conf.j2
@@ -0,0 +1,62 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+#
+# SAMPLE CONFIG SNIPPETS FOR APACHE WEB SERVER
+# Last Modified: 11-26-2005
+#
+# This file contains examples of entries that need
+# to be incorporated into your Apache web server
+# configuration file.  Customize the paths, etc. as
+# needed to fit your system.
+#
+
+ScriptAlias /nagios/cgi-bin "/usr/lib/nagios/cgi"
+
+<Directory "/usr/lib/nagios/cgi">
+#  SSLRequireSSL
+   Options ExecCGI
+   AllowOverride None
+   Order allow,deny
+   Allow from all
+#  Order deny,allow
+#  Deny from all
+#  Allow from 127.0.0.1
+   AuthName "Nagios Access"
+   AuthType Basic
+   AuthUserFile /etc/nagios/htpasswd.users
+   Require valid-user
+</Directory>
+
+Alias /nagios "/usr/share/nagios"
+
+<Directory "/usr/share/nagios">
+#  SSLRequireSSL
+   Options None
+   AllowOverride None
+   Order allow,deny
+   Allow from all
+#  Order deny,allow
+#  Deny from all
+#  Allow from 127.0.0.1
+   AuthName "Nagios Access"
+   AuthType Basic
+   AuthUserFile /etc/nagios/htpasswd.users
+   Require valid-user
+</Directory>
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/nagios.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/nagios.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/nagios.j2
new file mode 100644
index 0000000..01e21ac
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/nagios.j2
@@ -0,0 +1,146 @@
+#!/bin/sh
+# $Id$
+# Nagios	Startup script for the Nagios monitoring daemon
+#
+# chkconfig:	- 85 15
+# description:	Nagios is a service monitoring system
+# processname: nagios
+# config: /etc/nagios/nagios.cfg
+# pidfile: /var/nagios/nagios.pid
+#
+### BEGIN INIT INFO
+# Provides:		nagios
+# Required-Start:	$local_fs $syslog $network
+# Required-Stop:	$local_fs $syslog $network
+# Short-Description:    start and stop Nagios monitoring server
+# Description:		Nagios is is a service monitoring system 
+### END INIT INFO
+
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+# Source function library.
+. /etc/rc.d/init.d/functions
+
+prefix="/usr"
+exec_prefix="/usr"
+exec="/usr/sbin/nagios"
+prog="nagios"
+config="/etc/nagios/nagios.cfg"
+pidfile="{{nagios_pid_file}}"
+user="{{nagios_user}}"
+
+[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog
+
+lockfile=/var/lock/subsys/$prog
+
+start() {
+    [ -x $exec ] || exit 5
+    [ -f $config ] || exit 6
+    echo -n $"Starting $prog: "
+    daemon --user=$user $exec -d $config
+    retval=$?
+    echo
+    [ $retval -eq 0 ] && touch $lockfile
+    return $retval
+}
+
+stop() {
+    echo -n $"Stopping $prog: "
+    killproc -d 10 $exec
+    retval=$?
+    echo
+    [ $retval -eq 0 ] && rm -f $lockfile
+    return $retval
+}
+
+
+restart() {
+    stop
+    start
+}
+
+reload() {
+    echo -n $"Reloading $prog: "
+    killproc $exec -HUP
+    RETVAL=$?
+    echo
+}
+
+force_reload() {
+    restart
+}
+
+check_config() {
+        $nice runuser -s /bin/bash - $user -c "$corelimit >/dev/null 2>&1 ; $exec -v $config > /dev/null 2>&1"
+        RETVAL=$?
+        if [ $RETVAL -ne 0 ] ; then
+                echo -n $"Configuration validation failed"
+                failure
+                echo
+                exit 1
+
+        fi
+}
+
+
+case "$1" in
+    start)
+        status $prog && exit 0
+	check_config
+        $1
+        ;;
+    stop)
+        status $prog|| exit 0
+        $1
+        ;;
+    restart)
+	check_config
+        $1
+        ;;
+    reload)
+        status $prog || exit 7
+	check_config
+        $1
+        ;;
+    force-reload)
+	check_config
+        force_reload
+        ;;
+    status)
+        status $prog
+        ;;
+    condrestart|try-restart)
+        status $prog|| exit 0
+	check_config
+        restart
+        ;;
+    configtest)
+        echo -n  $"Checking config for $prog: "
+        check_config && success
+        echo
+	;;
+    *)
+        echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload|configtest}"
+        exit 2
+esac
+exit $?

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/resource.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/resource.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/resource.cfg.j2
new file mode 100644
index 0000000..23c7a56
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/resource.cfg.j2
@@ -0,0 +1,33 @@
+###########################################################################
+#
+# RESOURCE.CFG - Sample Resource File for Nagios 3.2.3
+#
+# Last Modified: 09-10-2003
+#
+# You can define $USERx$ macros in this file, which can in turn be used
+# in command definitions in your host config file(s).  $USERx$ macros are
+# useful for storing sensitive information such as usernames, passwords,
+# etc.  They are also handy for specifying the path to plugins and
+# event handlers - if you decide to move the plugins or event handlers to
+# a different directory in the future, you can just update one or two
+# $USERx$ macros, instead of modifying a lot of command definitions.
+#
+# The CGIs will not attempt to read the contents of resource files, so
+# you can set restrictive permissions (600 or 660) on them.
+#
+# Nagios supports up to 32 $USERx$ macros ($USER1$ through $USER32$)
+#
+# Resource files may also be used to store configuration directives for
+# external data sources like MySQL...
+#
+###########################################################################
+
+# Sets $USER1$ to be the path to the plugins
+$USER1$={{plugins_dir}}
+
+# Sets $USER2$ to be the path to event handlers
+#$USER2$={{eventhandlers_dir}}
+
+# Store some usernames and passwords (hidden from the CGIs)
+#$USER3$=someuser
+#$USER4$=somepassword
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/configuration/oozie-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/configuration/oozie-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/configuration/oozie-site.xml
new file mode 100644
index 0000000..bf4533f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/configuration/oozie-site.xml
@@ -0,0 +1,313 @@
+<?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+        
+       http://www.apache.org/licenses/LICENSE-2.0
+  
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->     
+
+<configuration>
+
+<!--
+    Refer to the oozie-default.xml file for the complete list of
+    Oozie configuration properties and their default values.
+-->
+  <property>
+    <name>oozie.base.url</name>
+    <value>http://localhost:11000/oozie</value>
+    <description>Base Oozie URL.</description>
+   </property>
+
+  <property>
+    <name>oozie.system.id</name>
+    <value>oozie-${user.name}</value>
+    <description>
+    The Oozie system ID.
+    </description>
+   </property>
+
+   <property>
+     <name>oozie.systemmode</name>
+     <value>NORMAL</value>
+     <description>
+     System mode for  Oozie at startup.
+     </description>
+   </property>
+
+   <property>
+     <name>oozie.service.AuthorizationService.security.enabled</name>
+     <value>true</value>
+     <description>
+     Specifies whether security (user name/admin role) is enabled or not.
+     If disabled any user can manage Oozie system and manage any job.
+     </description>
+   </property>
+
+   <property>
+     <name>oozie.service.PurgeService.older.than</name>
+     <value>30</value>
+     <description>
+     Jobs older than this value, in days, will be purged by the PurgeService.
+     </description>
+   </property>
+
+   <property>
+     <name>oozie.service.PurgeService.purge.interval</name>
+     <value>3600</value>
+     <description>
+     Interval at which the purge service will run, in seconds.
+     </description>
+   </property>
+
+   <property>
+     <name>oozie.service.CallableQueueService.queue.size</name>
+     <value>1000</value>
+     <description>Max callable queue size</description>
+   </property>
+
+   <property>
+     <name>oozie.service.CallableQueueService.threads</name>
+     <value>10</value>
+     <description>Number of threads used for executing callables</description>
+   </property>
+
+   <property>
+     <name>oozie.service.CallableQueueService.callable.concurrency</name>
+     <value>3</value>
+     <description>
+     Maximum concurrency for a given callable type.
+     Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).
+     Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).
+     All commands that use action executors (action-start, action-end, action-kill and action-check) use
+     the action type as the callable type.
+     </description>
+   </property>
+
+   <property>
+     <name>oozie.service.coord.normal.default.timeout</name>
+     <value>120</value>
+     <description>Default timeout for a coordinator action input check (in minutes) for normal job.
+      -1 means infinite timeout</description>
+   </property>
+
+   <property>
+     <name>oozie.db.schema.name</name>
+     <value>oozie</value>
+     <description>
+      Oozie DataBase Name
+     </description>
+   </property>
+
+    <property>
+      <name>oozie.service.HadoopAccessorService.jobTracker.whitelist</name>
+      <value> </value>
+      <description>
+      Whitelisted job tracker for Oozie service.
+      </description>
+    </property>
+   
+    <property>
+      <name>oozie.authentication.type</name>
+      <value>simple</value>
+      <description>
+      </description>
+    </property>
+   
+    <property>
+      <name>oozie.service.HadoopAccessorService.nameNode.whitelist</name>
+      <value> </value>
+      <description>
+      </description>
+    </property>
+
+    <property>
+      <name>oozie.service.WorkflowAppService.system.libpath</name>
+      <value>/user/${user.name}/share/lib</value>
+      <description>
+      System library path to use for workflow applications.
+      This path is added to workflow application if their job properties sets
+      the property 'oozie.use.system.libpath' to true.
+      </description>
+    </property>
+
+    <property>
+      <name>use.system.libpath.for.mapreduce.and.pig.jobs</name>
+      <value>false</value>
+      <description>
+      If set to true, submissions of MapReduce and Pig jobs will include
+      automatically the system library path, thus not requiring users to
+      specify where the Pig JAR files are. Instead, the ones from the system
+      library path are used.
+      </description>
+    </property>
+    <property>
+      <name>oozie.authentication.kerberos.name.rules</name>
+      <value>
+        RULE:[2:$1@$0]([jt]t@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-MAPREDUSER/
+        RULE:[2:$1@$0]([nd]n@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HDFSUSER/
+        RULE:[2:$1@$0](hm@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
+        RULE:[2:$1@$0](rs@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
+        DEFAULT
+        </value>
+      <description>The mapping from kerberos principal names to local OS user names.</description>
+    </property>
+    <property>
+      <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
+      <value>*=/etc/hadoop/conf</value>
+      <description>
+          Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
+          the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
+          used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
+          the relevant Hadoop *-site.xml files. If the path is relative is looked within
+          the Oozie configuration directory; though the path can be absolute (i.e. to point
+          to Hadoop client conf/ directories in the local filesystem.
+      </description>
+    </property>
+    <property>
+        <name>oozie.service.ActionService.executor.ext.classes</name>
+        <value>
+            org.apache.oozie.action.email.EmailActionExecutor,
+            org.apache.oozie.action.hadoop.HiveActionExecutor,
+            org.apache.oozie.action.hadoop.ShellActionExecutor,
+            org.apache.oozie.action.hadoop.SqoopActionExecutor,
+            org.apache.oozie.action.hadoop.DistcpActionExecutor
+        </value>
+    </property>
+
+    <property>
+        <name>oozie.service.SchemaService.wf.ext.schemas</name>
+        <value>shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd,shell-action-0.2.xsd,oozie-sla-0.1.xsd,oozie-sla-0.2.xsd</value>
+    </property>
+    <property>
+        <name>oozie.service.JPAService.create.db.schema</name>
+        <value>false</value>
+        <description>
+            Creates Oozie DB.
+
+            If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
+            If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
+        </description>
+    </property>
+
+    <property>
+        <name>oozie.service.JPAService.jdbc.driver</name>
+        <value>org.apache.derby.jdbc.EmbeddedDriver</value>
+        <description>
+            JDBC driver class.
+        </description>
+    </property>
+
+    <property>
+        <name>oozie.service.JPAService.jdbc.url</name>
+        <value>jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true</value>
+        <description>
+            JDBC URL.
+        </description>
+    </property>
+
+    <property>
+        <name>oozie.service.JPAService.jdbc.username</name>
+        <value>oozie</value>
+        <description>
+          Database user name to use to connect to the database
+        </description>
+    </property>
+
+    <property>
+        <name>oozie.service.JPAService.jdbc.password</name>
+        <value> </value>
+        <description>
+            DB user password.
+
+            IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,
+                       if empty Configuration assumes it is NULL.
+        </description>
+    </property>
+
+    <property>
+        <name>oozie.service.JPAService.pool.max.active.conn</name>
+        <value>10</value>
+        <description>
+             Max number of connections.
+        </description>
+    </property>
+
+    <property>
+      <name>oozie.services</name>
+      <value>
+        org.apache.oozie.service.SchedulerService,
+        org.apache.oozie.service.InstrumentationService,
+        org.apache.oozie.service.CallableQueueService,
+        org.apache.oozie.service.UUIDService,
+        org.apache.oozie.service.ELService,
+        org.apache.oozie.service.AuthorizationService,
+        org.apache.oozie.service.UserGroupInformationService,
+        org.apache.oozie.service.HadoopAccessorService,
+        org.apache.oozie.service.URIHandlerService,
+        org.apache.oozie.service.MemoryLocksService,
+        org.apache.oozie.service.DagXLogInfoService,
+        org.apache.oozie.service.SchemaService,
+        org.apache.oozie.service.LiteWorkflowAppService,
+        org.apache.oozie.service.JPAService,
+        org.apache.oozie.service.StoreService,
+        org.apache.oozie.service.CoordinatorStoreService,
+        org.apache.oozie.service.SLAStoreService,
+        org.apache.oozie.service.DBLiteWorkflowStoreService,
+        org.apache.oozie.service.CallbackService,
+        org.apache.oozie.service.ActionService,
+        org.apache.oozie.service.ActionCheckerService,
+        org.apache.oozie.service.RecoveryService,
+        org.apache.oozie.service.PurgeService,
+        org.apache.oozie.service.CoordinatorEngineService,
+        org.apache.oozie.service.BundleEngineService,
+        org.apache.oozie.service.DagEngineService,
+        org.apache.oozie.service.CoordMaterializeTriggerService,
+        org.apache.oozie.service.StatusTransitService,
+        org.apache.oozie.service.PauseTransitService,
+        org.apache.oozie.service.GroupsService,
+        org.apache.oozie.service.ProxyUserService
+      </value>
+      <description>List of Oozie services</description>
+    </property>
+    <property>
+      <name>oozie.service.URIHandlerService.uri.handlers</name>
+      <value>org.apache.oozie.dependency.FSURIHandler,org.apache.oozie.dependency.HCatURIHandler</value>
+      <description>
+        Enlist the different uri handlers supported for data availability checks.
+      </description>
+    </property>
+    <property>
+    <name>oozie.services.ext</name>
+    <value>org.apache.oozie.service.PartitionDependencyManagerService,org.apache.oozie.service.HCatAccessorService</value>
+    <description>
+       To add/replace services defined in 'oozie.services' with custom implementations.
+       Class names must be separated by commas.
+    </description>
+    </property>
+    <property>
+    <name>oozie.service.coord.push.check.requeue.interval</name>
+    <value>30000</value>
+    <description>
+        Command re-queue interval for push dependencies (in millisecond).
+    </description>
+    </property>
+    <property>
+      <name>oozie.credentials.credentialclasses</name>
+      <value>hcat=org.apache.oozie.action.hadoop.HCatCredentials</value>
+      <description>
+        Credential Class to be used for HCat.
+      </description>
+    </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/metainfo.xml
new file mode 100644
index 0000000..a208c69
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/metainfo.xml
@@ -0,0 +1,84 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>OOZIE</name>
+      <comment>System for workflow coordination and execution of Apache Hadoop jobs.  This also includes the installation of the optional Oozie Web Console which relies on and will install the &lt;a target="_blank" href="http://www.sencha.com/products/extjs/license/"&gt;ExtJS&lt;/a&gt; Library.
+      </comment>
+      <version>4.0.0.2.1.1</version>
+      <components>
+        <component>
+          <name>OOZIE_SERVER</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/oozie_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>OOZIE_CLIENT</name>
+          <category>CLIENT</category>
+          <commandScript>
+            <script>scripts/oozie_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>oozie.noarch</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>oozie-client.noarch</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>extjs-2.2-1</name>
+            </package>
+            <!--TODO: uncomment this after package will be available in repo-->
+            <!--<package>-->
+              <!--<type>rpm</type>-->
+              <!--<name>falcon-0.4.0.2.0.6.0-76</name>-->
+            <!--</package>-->
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>global</config-type>
+        <config-type>oozie-site</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/files/oozieSmoke2.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/files/oozieSmoke2.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/files/oozieSmoke2.sh
new file mode 100644
index 0000000..2cb5a7a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/files/oozieSmoke2.sh
@@ -0,0 +1,95 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+function getValueFromField {
+  xmllint $1 | grep "<name>$2</name>" -C 2 | grep '<value>' | cut -d ">" -f2 | cut -d "<" -f1
+  return $?
+}
+
+function checkOozieJobStatus {
+  local job_id=$1
+  local num_of_tries=$2
+  #default num_of_tries to 10 if not present
+  num_of_tries=${num_of_tries:-10}
+  local i=0
+  local rc=1
+  local cmd="source ${oozie_conf_dir}/oozie-env.sh ; /usr/bin/oozie job -oozie ${OOZIE_SERVER} -info $job_id"
+  su - ${smoke_test_user} -c "$cmd"
+  while [ $i -lt $num_of_tries ] ; do
+    cmd_output=`su - ${smoke_test_user} -c "$cmd"`
+    (IFS='';echo $cmd_output)
+    act_status=$(IFS='';echo $cmd_output | grep ^Status | cut -d':' -f2 | sed 's| ||g')
+    echo "workflow_status=$act_status"
+    if [ "RUNNING" == "$act_status" ]; then
+      #increment the couner and get the status again after waiting for 15 secs
+      sleep 15
+      (( i++ ))
+      elif [ "SUCCEEDED" == "$act_status" ]; then
+        rc=0;
+        break;
+      else
+        rc=1
+        break;
+      fi
+    done
+    return $rc
+}
+
+export oozie_conf_dir=$1
+export hadoop_conf_dir=$2
+export smoke_test_user=$3
+export security_enabled=$4
+export smoke_user_keytab=$5
+export kinit_path_local=$6
+
+export OOZIE_EXIT_CODE=0
+export JOBTRACKER=`getValueFromField ${hadoop_conf_dir}/yarn-site.xml yarn.resourcemanager.address`
+export NAMENODE=`getValueFromField ${hadoop_conf_dir}/core-site.xml fs.defaultFS`
+export OOZIE_SERVER=`getValueFromField ${oozie_conf_dir}/oozie-site.xml oozie.base.url | tr '[:upper:]' '[:lower:]'`
+export OOZIE_EXAMPLES_DIR=`rpm -ql oozie-client | grep 'oozie-examples.tar.gz$' | xargs dirname`
+cd $OOZIE_EXAMPLES_DIR
+
+tar -zxf oozie-examples.tar.gz
+sed -i "s|nameNode=hdfs://localhost:8020|nameNode=$NAMENODE|g"  examples/apps/map-reduce/job.properties
+sed -i "s|nameNode=hdfs://localhost:9000|nameNode=$NAMENODE|g"  examples/apps/map-reduce/job.properties
+sed -i "s|jobTracker=localhost:8021|jobTracker=$JOBTRACKER|g" examples/apps/map-reduce/job.properties
+sed -i "s|jobTracker=localhost:9001|jobTracker=$JOBTRACKER|g" examples/apps/map-reduce/job.properties
+sed -i "s|jobTracker=localhost:8032|jobTracker=$JOBTRACKER|g" examples/apps/map-reduce/job.properties
+sed -i "s|oozie.wf.application.path=hdfs://localhost:9000|oozie.wf.application.path=$NAMENODE|g" examples/apps/map-reduce/job.properties
+
+if [[ $security_enabled == "true" ]]; then
+  kinitcmd="${kinit_path_local} -kt ${smoke_user_keytab} ${smoke_test_user}; "
+else 
+  kinitcmd=""
+fi
+
+su - ${smoke_test_user} -c "hdfs dfs -rm -r examples"
+su - ${smoke_test_user} -c "hdfs dfs -rm -r input-data"
+su - ${smoke_test_user} -c "hdfs dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples examples"
+su - ${smoke_test_user} -c "hdfs dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples/input-data input-data"
+
+cmd="${kinitcmd}source ${oozie_conf_dir}/oozie-env.sh ; /usr/bin/oozie -Doozie.auth.token.cache=false job -oozie $OOZIE_SERVER -config $OOZIE_EXAMPLES_DIR/examples/apps/map-reduce/job.properties  -run"
+echo $cmd
+job_info=`su - ${smoke_test_user} -c "$cmd" | grep "job:"`
+job_id="`echo $job_info | cut -d':' -f2`"
+checkOozieJobStatus "$job_id"
+OOZIE_EXIT_CODE="$?"
+exit $OOZIE_EXIT_CODE

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/files/wrap_ooziedb.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/files/wrap_ooziedb.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/files/wrap_ooziedb.sh
new file mode 100644
index 0000000..97a513c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/files/wrap_ooziedb.sh
@@ -0,0 +1,31 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+OUT=`cd /var/tmp/oozie && /usr/lib/oozie/bin/ooziedb.sh "$@" 2>&1`
+EC=$?
+echo $OUT
+GRVAR=`echo ${OUT} | grep -o "java.lang.Exception: DB schema exists"`
+if [ ${EC} -ne 0 ] && [ -n "$GRVAR" ]
+then
+  exit 0
+else
+  exit $EC
+fi  

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/oozie.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/oozie.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/oozie.py
new file mode 100644
index 0000000..1422d1e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/oozie.py
@@ -0,0 +1,122 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+def oozie(is_server=False # TODO: see if see can remove this
+              ):
+  import params
+  #TODO hack for falcon el
+  oozie_site = dict(params.config['configurations']['oozie-site'])
+  oozie_site["oozie.services.ext"] = 'org.apache.oozie.service.JMSAccessorService,' + oozie_site["oozie.services.ext"]
+  XmlConfig( "oozie-site.xml",
+    conf_dir = params.conf_dir, 
+    configurations = oozie_site,
+    owner = params.oozie_user,
+    group = params.user_group,
+    mode = 0664
+  )
+  Directory( params.conf_dir,
+    owner = params.oozie_user,
+    group = params.user_group
+  )
+  
+  TemplateConfig( format("{conf_dir}/oozie-env.sh"),
+    owner = params.oozie_user
+  )
+  
+  TemplateConfig( format("{conf_dir}/oozie-log4j.properties"),
+    owner = params.oozie_user
+  )
+
+  if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
+    Execute(format("/bin/sh -c 'cd /usr/lib/ambari-agent/ &&\
+    curl -kf --retry 5 {jdk_location}{check_db_connection_jar_name}\
+     -o {check_db_connection_jar_name}'"),
+      not_if  = format("[ -f {check_db_connection_jar} ]")
+    )
+    
+  oozie_ownership( )
+  
+  if is_server:      
+    oozie_server_specific( )
+  
+def oozie_ownership(
+):
+  import params
+  
+  File ( format("{conf_dir}/adminusers.txt"),
+    owner = params.oozie_user,
+    group = params.user_group
+  )
+
+  File ( format("{conf_dir}/hadoop-config.xml"),
+    owner = params.oozie_user,
+    group = params.user_group
+  )
+
+  File ( format("{conf_dir}/oozie-default.xml"),
+    owner = params.oozie_user,
+    group = params.user_group
+  )
+
+  Directory ( format("{conf_dir}/action-conf"),
+    owner = params.oozie_user,
+    group = params.user_group
+  )
+
+  File ( format("{conf_dir}/action-conf/hive.xml"),
+    owner = params.oozie_user,
+    group = params.user_group
+  )
+  
+def oozie_server_specific(
+):
+  import params
+  
+  oozie_server_directorties = [params.oozie_pid_dir, params.oozie_log_dir, params.oozie_tmp_dir, params.oozie_data_dir, params.oozie_lib_dir, params.oozie_webapps_dir]            
+  Directory( oozie_server_directorties,
+    owner = params.oozie_user,
+    mode = 0755,
+    recursive = True
+  )
+       
+  cmd1 = "cd /usr/lib/oozie && tar -xvf oozie-sharelib.tar.gz"
+  cmd2 =  format("cd /usr/lib/oozie && mkdir -p {oozie_tmp_dir}")
+  
+  # this is different for HDP1
+  cmd3 = format("cd /usr/lib/oozie && chown {oozie_user}:{user_group} {oozie_tmp_dir} && mkdir -p {oozie_libext_dir} && cp {ext_js_path} {oozie_libext_dir}")
+  if params.jdbc_driver_name=="com.mysql.jdbc.Driver" or params.jdbc_driver_name=="oracle.jdbc.driver.OracleDriver":
+    cmd3 += format(" && cp {jdbc_driver_jar} {oozie_libext_dir}")
+  #falcon el extension
+  if params.has_falcon_host:
+    Execute(format('cp {falcon_home}/oozie/ext/falcon-oozie-el-extension-0.4.0.2.0.6.0-76.jar {oozie_libext_dir}'))
+  # this is different for HDP1
+  cmd4 = format("cd {oozie_tmp_dir} && /usr/lib/oozie/bin/oozie-setup.sh prepare-war")
+  
+  no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
+  Execute( [cmd1, cmd2, cmd3],
+    not_if  = no_op_test
+  )
+  Execute( cmd4,
+    user = params.oozie_user,
+    not_if  = no_op_test
+  )
+  

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/oozie_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/oozie_client.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/oozie_client.py
new file mode 100644
index 0000000..1d5db39
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/oozie_client.py
@@ -0,0 +1,33 @@
+import sys
+from resource_management import *
+
+from oozie import oozie
+from oozie_service import oozie_service
+
+         
+class OozieClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+    
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    oozie(is_server=False)
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+    
+def main():
+  command_type = sys.argv[1] if len(sys.argv)>1 else "install"
+  print "Running "+command_type
+  command_data_file = '/root/workspace/Oozie/input.json'
+  basedir = '/root/workspace/Oozie/main'
+  sys.argv = ["", command_type, command_data_file, basedir]
+  
+  OozieClient().execute()
+  
+if __name__ == "__main__":
+  #main()
+  OozieClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/oozie_server.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/oozie_server.py
new file mode 100644
index 0000000..6c00738
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/oozie_server.py
@@ -0,0 +1,47 @@
+import sys
+from resource_management import *
+
+from oozie import oozie
+from oozie_service import oozie_service
+
+         
+class OozieServer(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+    
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    oozie(is_server=True)
+    
+  def start(self, env):
+    import params
+    env.set_params(params)
+    #TODO remove this when config command will be implemented
+    self.configure(env)
+    oozie_service(action='start')
+    
+  def stop(self, env):
+    import params
+    env.set_params(params)
+    oozie_service(action='stop')
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.pid_file)
+    
+def main():
+  command_type = sys.argv[1] if len(sys.argv)>1 else "start"
+  print "Running "+command_type
+  command_data_file = '/root/workspace/Oozie/input.json'
+  basedir = '/root/workspace/Oozie/main'
+  sys.argv = ["", command_type, command_data_file, basedir]
+  
+  OozieServer().execute()
+  
+if __name__ == "__main__":
+  #main()
+  OozieServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/oozie_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/oozie_service.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/oozie_service.py
new file mode 100644
index 0000000..e9edcc9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/oozie_service.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+def oozie_service(action = 'start'): # 'start' or 'stop'
+  import params
+
+  kinit_if_needed = format("{kinit_path_local} -kt {oozie_keytab} {oozie_principal};") if params.security_enabled else ""
+  no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
+  
+  if action == 'start':
+    start_cmd = format("cd {oozie_tmp_dir} && /usr/lib/oozie/bin/oozie-start.sh")
+    
+    if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
+      db_connection_check_command = format("{java_home}/bin/java -cp {check_db_connection_jar}:{jdbc_driver_jar} org.apache.ambari.server.DBConnectionVerification {oozie_jdbc_connection_url} {oozie_metastore_user_name} {oozie_metastore_user_passwd} {jdbc_driver_name}")
+    else:
+      db_connection_check_command = None
+      
+    cmd1 =  format("cd {oozie_tmp_dir} && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run")
+    cmd2 =  format("{kinit_if_needed} hadoop dfs -put /usr/lib/oozie/share {oozie_hdfs_user_dir} ; hadoop dfs -chmod -R 755 {oozie_hdfs_user_dir}/share")
+      
+    if db_connection_check_command:
+      Execute( db_connection_check_command)
+                  
+    Execute( cmd1,
+      user = params.oozie_user,
+      not_if  = no_op_test,
+      ignore_failures = True
+    ) 
+    
+    Execute( cmd2,
+      user = params.oozie_user,       
+      not_if = format("{kinit_if_needed} hadoop dfs -ls /user/oozie/share | awk 'BEGIN {{count=0;}} /share/ {{count++}} END {{if (count > 0) {{exit 0}} else {{exit 1}}}}'")
+    )
+    
+    Execute( start_cmd,
+      user = params.oozie_user,
+      not_if  = no_op_test,
+    )
+  elif action == 'stop':
+    stop_cmd  = format("su - {oozie_user} -c  'cd {oozie_tmp_dir} && /usr/lib/oozie/bin/oozie-stop.sh' && rm -f {pid_file}")
+    Execute( stop_cmd,
+      only_if  = no_op_test
+    )
+
+  
+  


[25/37] AMBARI-4341. Rename 2.0.8 to 2.1.1 in the stack definition. (mahadev)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/files/hbaseSmokeVerify.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/files/hbaseSmokeVerify.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/files/hbaseSmokeVerify.sh
deleted file mode 100644
index 39fe6e5..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/files/hbaseSmokeVerify.sh
+++ /dev/null
@@ -1,32 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-conf_dir=$1
-data=$2
-echo "scan 'ambarismoketest'" | hbase --config $conf_dir shell > /tmp/hbase_chk_verify
-cat /tmp/hbase_chk_verify
-echo "Looking for $data"
-grep -q $data /tmp/hbase_chk_verify
-if [ "$?" -ne 0 ]
-then
-  exit 1
-fi
-
-grep -q '1 row(s)' /tmp/hbase_chk_verify
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/scripts/__init__.py
deleted file mode 100644
index 5561e10..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/scripts/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/scripts/functions.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/scripts/functions.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/scripts/functions.py
deleted file mode 100644
index 80b49e6..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/scripts/functions.py
+++ /dev/null
@@ -1,67 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-import re
-import math
-import datetime
-
-from resource_management.core.shell import checked_call
-
-def calc_xmn_from_xms(heapsize_str, xmn_percent, xmn_max):
-  """
-  @param heapsize_str: str (e.g '1000m')
-  @param xmn_percent: float (e.g 0.2)
-  @param xmn_max: integer (e.g 512)
-  """
-  heapsize = int(re.search('\d+',heapsize_str).group(0))
-  heapsize_unit = re.search('\D+',heapsize_str).group(0)
-  xmn_val = int(math.floor(heapsize*xmn_percent))
-  xmn_val -= xmn_val % 8
-  
-  result_xmn_val = xmn_max if xmn_val > xmn_max else xmn_val
-  return str(result_xmn_val) + heapsize_unit
-
-def get_unique_id_and_date():
-    code, out = checked_call("hostid")
-    id = out.strip()
-    
-    now = datetime.datetime.now()
-    date = now.strftime("%M%d%y")
-
-    return "id{id}_date{date}".format(id=id, date=date)
-  
-def get_kinit_path(pathes_list):
-  """
-  @param pathes: comma separated list
-  """
-  kinit_path = ""
-  
-  for x in pathes_list:
-    if not x:
-      continue
-    
-    path = os.path.join(x,"kinit")
-
-    if os.path.isfile(path):
-      kinit_path = path
-      break
-    
-  return kinit_path

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/scripts/hbase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/scripts/hbase.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/scripts/hbase.py
deleted file mode 100644
index bd33463..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/scripts/hbase.py
+++ /dev/null
@@ -1,91 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import sys
-
-def hbase(type=None # 'master' or 'regionserver' or 'client'
-              ):
-  import params
-  
-  Directory( params.conf_dir,
-      owner = params.hbase_user,
-      group = params.user_group,
-      recursive = True
-  )
-  
-  XmlConfig( "hbase-site.xml",
-            conf_dir = params.conf_dir,
-            configurations = params.config['configurations']['hbase-site'],
-            owner = params.hbase_user,
-            group = params.user_group
-  )
-
-  XmlConfig( "hdfs-site.xml",
-            conf_dir = params.conf_dir,
-            configurations = params.config['configurations']['hdfs-site'],
-            owner = params.hbase_user,
-            group = params.user_group
-  )
-  
-  if 'hbase-policy' in params.config['configurations']:
-    XmlConfig( "hbase-policy.xml",
-      configurations = params.config['configurations']['hbase-policy'],
-      owner = params.hbase_user,
-      group = params.user_group
-    )
-  # Manually overriding ownership of file installed by hadoop package
-  else: 
-    File( format("{conf_dir}/hbase-policy.xml"),
-      owner = params.hbase_user,
-      group = params.user_group
-    )
-  
-  hbase_TemplateConfig( 'hbase-env.sh')     
-       
-  hbase_TemplateConfig( params.metric_prop_file_name,
-    tag = 'GANGLIA-MASTER' if type == 'master' else 'GANGLIA-RS'
-  )
-
-  hbase_TemplateConfig( 'regionservers')
-
-  if params.security_enabled:
-    hbase_TemplateConfig( format("hbase_{type}_jaas.conf"))
-  
-  if type != "client":
-    Directory( params.pid_dir,
-      owner = params.hbase_user,
-      recursive = True
-    )
-  
-    Directory ( [params.tmp_dir, params.log_dir],
-      owner = params.hbase_user,
-      recursive = True
-    )    
-
-def hbase_TemplateConfig(name, 
-                         tag=None
-                         ):
-  import params
-
-  TemplateConfig( format("{conf_dir}/{name}"),
-      owner = params.hbase_user,
-      template_tag = tag
-  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/scripts/hbase_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/scripts/hbase_client.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/scripts/hbase_client.py
deleted file mode 100644
index 0f2a1bc..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/scripts/hbase_client.py
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from hbase import hbase
-
-         
-class HbaseClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-    
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    
-    hbase(type='client')
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-#for tests
-def main():
-  command_type = 'install'
-  command_data_file = '/root/workspace/HBase/input.json'
-  basedir = '/root/workspace/HBase/'
-  stdoutfile = '/1.txt'
-  sys.argv = ["", command_type, command_data_file, basedir, stdoutfile]
-  
-  HbaseClient().execute()
-  
-if __name__ == "__main__":
-  HbaseClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/scripts/hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/scripts/hbase_master.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/scripts/hbase_master.py
deleted file mode 100644
index d94b4b4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/scripts/hbase_master.py
+++ /dev/null
@@ -1,74 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from hbase import hbase
-from hbase_service import hbase_service
-
-         
-class HbaseMaster(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-    
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    hbase(type='master')
-    
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env) # for security
-
-    hbase_service( 'master',
-      action = 'start'
-    )
-    
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    hbase_service( 'master',
-      action = 'stop'
-    )
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    pid_file = format("{pid_dir}/hbase-hbase-master.pid")
-    check_process_status(pid_file)
-
-def main():
-  command_type = sys.argv[1] if len(sys.argv)>1 else "install"
-  print "Running "+command_type
-  command_data_file = '/var/lib/ambari-agent/data/command-3.json'
-  basedir = '/root/ambari/ambari-server/src/main/resources/stacks/HDP/2.0._/services/HBASE/package'
-  stroutputf = '/1.txt'
-  sys.argv = ["", command_type, command_data_file, basedir, stroutputf]
-  
-  HbaseMaster().execute()
-  
-if __name__ == "__main__":
-  HbaseMaster().execute()
-  #main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/scripts/hbase_regionserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/scripts/hbase_regionserver.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/scripts/hbase_regionserver.py
deleted file mode 100644
index 2d91e75..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/scripts/hbase_regionserver.py
+++ /dev/null
@@ -1,75 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from hbase import hbase
-from hbase_service import hbase_service
-
-         
-class HbaseRegionServer(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-    
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    hbase(type='regionserver')
-      
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env) # for security
-
-    hbase_service( 'regionserver',
-      action = 'start'
-    )
-    
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    hbase_service( 'regionserver',
-      action = 'stop'
-    )
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    pid_file = format("{pid_dir}/hbase-hbase-regionserver.pid")
-    check_process_status(pid_file)
-    
-  def decommission(self, env):
-    print "Decommission not yet implemented!"
-    
-def main():
-  command_type = sys.argv[1] if len(sys.argv)>1 else "stop"
-  print "Running "+command_type
-  command_data_file = '/root/workspace/HBase/input.json'
-  basedir = '/root/workspace/HBase/main'
-  sys.argv = ["", command_type, command_data_file, basedir]
-  
-  HbaseRegionServer().execute()
-  
-if __name__ == "__main__":
-  HbaseRegionServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/scripts/hbase_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/scripts/hbase_service.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/scripts/hbase_service.py
deleted file mode 100644
index 7a1248b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/scripts/hbase_service.py
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-def hbase_service(
-  name,
-  action = 'start'): # 'start' or 'stop' or 'status'
-    
-    import params
-  
-    role = name
-    cmd = format("{daemon_script} --config {conf_dir}")
-    pid_file = format("{pid_dir}/hbase-hbase-{role}.pid")
-    
-    daemon_cmd = None
-    no_op_test = None
-    
-    if action == 'start':
-      daemon_cmd = format("{cmd} start {role}")
-      no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
-    elif action == 'stop':
-      daemon_cmd = format("{cmd} stop {role} && rm -f {pid_file}")
-
-    if daemon_cmd is not None:
-      Execute ( daemon_cmd,
-        not_if = no_op_test,
-        user = params.hbase_user
-      )

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/scripts/params.py
deleted file mode 100644
index 674b2d9..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/scripts/params.py
+++ /dev/null
@@ -1,84 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import functions
-import status_params
-
-# server configurations
-config = Script.get_config()
-
-conf_dir = "/etc/hbase/conf"
-daemon_script = "/usr/lib/hbase/bin/hbase-daemon.sh"
-
-hbase_user = config['configurations']['global']['hbase_user']
-smokeuser = config['configurations']['global']['smokeuser']
-security_enabled = config['configurations']['global']['security_enabled']
-user_group = config['configurations']['global']['user_group']
-
-# this is "hadoop-metrics.properties" for 1.x stacks
-metric_prop_file_name = "hadoop-metrics2-hbase.properties"
-
-# not supporting 32 bit jdk.
-java64_home = config['hostLevelParams']['java_home']
-
-log_dir = config['configurations']['global']['hbase_log_dir']
-master_heapsize = config['configurations']['global']['hbase_master_heapsize']
-
-regionserver_heapsize = config['configurations']['global']['hbase_regionserver_heapsize']
-regionserver_xmn_size = functions.calc_xmn_from_xms(regionserver_heapsize, 0.2, 512)
-
-pid_dir = status_params.pid_dir
-tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
-
-client_jaas_config_file = default('hbase_client_jaas_config_file', format("{conf_dir}/hbase_client_jaas.conf"))
-master_jaas_config_file = default('hbase_master_jaas_config_file', format("{conf_dir}/hbase_master_jaas.conf"))
-regionserver_jaas_config_file = default('hbase_regionserver_jaas_config_file', format("{conf_dir}/hbase_regionserver_jaas.conf"))
-
-ganglia_server_hosts = default('/clusterHostInfo/ganglia_server_host', []) # is not passed when ganglia is not present
-ganglia_server_host = '' if len(ganglia_server_hosts) == 0 else ganglia_server_hosts[0]
-
-rs_hosts = default('hbase_rs_hosts', config['clusterHostInfo']['slave_hosts']) #if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
-
-smoke_test_user = config['configurations']['global']['smokeuser']
-smokeuser_permissions = default('smokeuser_permissions', "RWXCA")
-service_check_data = get_unique_id_and_date()
-
-if security_enabled:
-  
-  _use_hostname_in_principal = default('instance_name', True)
-  _master_primary_name = config['configurations']['global']['hbase_master_primary_name']
-  _hostname = config['hostname']
-  _kerberos_domain = config['configurations']['global']['kerberos_domain']
-  _master_principal_name = config['configurations']['global']['hbase_master_principal_name']
-  _regionserver_primary_name = config['configurations']['global']['hbase_regionserver_primary_name']
-  
-  if _use_hostname_in_principal:
-    master_jaas_princ = format("{_master_primary_name}/{_hostname}@{_kerberos_domain}")
-    regionserver_jaas_princ = format("{_regionserver_primary_name}/{_hostname}@{_kerberos_domain}")
-  else:
-    master_jaas_princ = format("{_master_principal_name}@{_kerberos_domain}")
-    regionserver_jaas_princ = format("{_regionserver_primary_name}@{_kerberos_domain}")
-    
-master_keytab_path = config['configurations']['hbase-site']['hbase.master.keytab.file']
-regionserver_keytab_path = config['configurations']['hbase-site']['hbase.regionserver.keytab.file']
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-hbase_user_keytab = config['configurations']['global']['hbase_user_keytab']
-kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/scripts/service_check.py
deleted file mode 100644
index ff6d0ed..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/scripts/service_check.py
+++ /dev/null
@@ -1,89 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import functions
-
-
-class HbaseServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-    
-    output_file = "/apps/hbase/data/ambarismoketest"
-    test_cmd = format("fs -test -e {output_file}")
-    kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smoke_test_user};") if params.security_enabled else ""
-    hbase_servicecheck_file = '/tmp/hbase-smoke.sh'
-  
-    File( '/tmp/hbaseSmokeVerify.sh',
-      content = StaticFile("hbaseSmokeVerify.sh"),
-      mode = 0755
-    )
-  
-    File( hbase_servicecheck_file,
-      mode = 0755,
-      content = Template('hbase-smoke.sh.j2')
-    )
-    
-    if params.security_enabled:    
-      hbase_grant_premissions_file = '/tmp/hbase_grant_permissions.sh'
-      hbase_kinit_cmd = format("{kinit_path_local} -kt {hbase_user_keytab} {hbase_user};")
-      grantprivelegecmd = format("{hbase_kinit_cmd} hbase shell {hbase_grant_premissions_file}")
-  
-      File( hbase_grant_premissions_file,
-        owner   = params.hbase_user,
-        group   = params.user_group,
-        mode    = 0644,
-        content = Template('hbase_grant_permissions.j2')
-      )
-      
-      Execute( grantprivelegecmd,
-        user = params.hbase_user,
-      )
-
-    servicecheckcmd = format("{kinit_cmd} hbase --config {conf_dir} shell {hbase_servicecheck_file}")
-    smokeverifycmd = format("{kinit_cmd} /tmp/hbaseSmokeVerify.sh {conf_dir} {service_check_data}")
-  
-    Execute( servicecheckcmd,
-      tries     = 3,
-      try_sleep = 5,
-      user = params.smoke_test_user,
-      logoutput = True
-    )
-  
-    Execute ( smokeverifycmd,
-      tries     = 3,
-      try_sleep = 5,
-      user = params.smoke_test_user,
-      logoutput = True
-    )
-    
-def main():
-  import sys
-  command_type = 'perform'
-  command_data_file = '/root/workspace/HBase/input.json'
-  basedir = '/root/workspace/HBase/main'
-  sys.argv = ["", command_type, command_data_file, basedir]
-  
-  HbaseServiceCheck().execute()
-  
-if __name__ == "__main__":
-  HbaseServiceCheck().execute()
-  

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/scripts/status_params.py
deleted file mode 100644
index c9b20ef..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/scripts/status_params.py
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-pid_dir = config['configurations']['global']['hbase_pid_dir']

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
deleted file mode 100644
index 2583f44..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
+++ /dev/null
@@ -1,62 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See http://wiki.apache.org/hadoop/GangliaMetrics
-#
-# Make sure you know whether you are using ganglia 3.0 or 3.1.
-# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
-# And, yes, this file is named hadoop-metrics.properties rather than
-# hbase-metrics.properties because we're leveraging the hadoop metrics
-# package and hadoop-metrics.properties is an hardcoded-name, at least
-# for the moment.
-#
-# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
-
-# HBase-specific configuration to reset long-running stats (e.g. compactions)
-# If this variable is left out, then the default is no expiration.
-hbase.extendedperiod = 3600
-
-# Configuration of the "hbase" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-hbase.period=10
-hbase.servers={{ganglia_server_host}}:8663
-
-# Configuration of the "jvm" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-jvm.period=10
-jvm.servers={{ganglia_server_host}}:8663
-
-# Configuration of the "rpc" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-rpc.period=10
-rpc.servers={{ganglia_server_host}}:8663
-
-#Ganglia following hadoop example
-hbase.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
-hbase.sink.ganglia.period=10
-
-# default for supportsparse is false
-*.sink.ganglia.supportsparse=true
-
-.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
-.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
-
-hbase.sink.ganglia.servers={{ganglia_server_host}}:8663

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
deleted file mode 100644
index 9f2b616..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
+++ /dev/null
@@ -1,62 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# See http://wiki.apache.org/hadoop/GangliaMetrics
-#
-# Make sure you know whether you are using ganglia 3.0 or 3.1.
-# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
-# And, yes, this file is named hadoop-metrics.properties rather than
-# hbase-metrics.properties because we're leveraging the hadoop metrics
-# package and hadoop-metrics.properties is an hardcoded-name, at least
-# for the moment.
-#
-# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
-
-# HBase-specific configuration to reset long-running stats (e.g. compactions)
-# If this variable is left out, then the default is no expiration.
-hbase.extendedperiod = 3600
-
-# Configuration of the "hbase" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-hbase.period=10
-hbase.servers={{ganglia_server_host}}:8660
-
-# Configuration of the "jvm" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-jvm.period=10
-jvm.servers={{ganglia_server_host}}:8660
-
-# Configuration of the "rpc" context for ganglia
-# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
-# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
-rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
-rpc.period=10
-rpc.servers={{ganglia_server_host}}:8660
-
-#Ganglia following hadoop example
-hbase.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
-hbase.sink.ganglia.period=10
-
-# default for supportsparse is false
-*.sink.ganglia.supportsparse=true
-
-.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
-.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
-
-hbase.sink.ganglia.servers={{ganglia_server_host}}:8660

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/templates/hbase-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/templates/hbase-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/templates/hbase-env.sh.j2
deleted file mode 100644
index b8505b5..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/templates/hbase-env.sh.j2
+++ /dev/null
@@ -1,82 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-# Set environment variables here.
-
-# The java implementation to use. Java 1.6 required.
-export JAVA_HOME={{java64_home}}
-
-# HBase Configuration directory
-export HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{conf_dir}}}
-
-# Extra Java CLASSPATH elements. Optional.
-export HBASE_CLASSPATH=${HBASE_CLASSPATH}
-
-# The maximum amount of heap to use, in MB. Default is 1000.
-# export HBASE_HEAPSIZE=1000
-
-# Extra Java runtime options.
-# Below are what we set by default. May only work with SUN JVM.
-# For more on why as well as other possible settings,
-# see http://wiki.apache.org/hadoop/PerformanceTuning
-export HBASE_OPTS="-XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log"
-export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`"
-# Uncomment below to enable java garbage collection logging.
-# export HBASE_OPTS="$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log"
-
-# Uncomment and adjust to enable JMX exporting
-# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.
-# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
-#
-# export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
-export HBASE_MASTER_OPTS="-Xmx{{master_heapsize}}"
-export HBASE_REGIONSERVER_OPTS="-Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}"
-# export HBASE_THRIFT_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"
-# export HBASE_ZOOKEEPER_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"
-
-# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.
-export HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers
-
-# Extra ssh options. Empty by default.
-# export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR"
-
-# Where log files are stored. $HBASE_HOME/logs by default.
-export HBASE_LOG_DIR={{log_dir}}
-
-# A string representing this instance of hbase. $USER by default.
-# export HBASE_IDENT_STRING=$USER
-
-# The scheduling priority for daemon processes. See 'man nice'.
-# export HBASE_NICENESS=10
-
-# The directory where pid files are stored. /tmp by default.
-export HBASE_PID_DIR={{pid_dir}}
-
-# Seconds to sleep between slave commands. Unset by default. This
-# can be useful in large clusters, where, e.g., slave rsyncs can
-# otherwise arrive faster than the master can service them.
-# export HBASE_SLAVE_SLEEP=0.1
-
-# Tell HBase whether it should manage it's own instance of Zookeeper or not.
-export HBASE_MANAGES_ZK=false
-
-{% if security_enabled %}
-export HBASE_OPTS="$HBASE_OPTS -Djava.security.auth.login.config={{client_jaas_config_file}}"
-export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Djava.security.auth.login.config={{master_jaas_config_file}}"
-export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Djava.security.auth.login.config={{regionserver_jaas_config_file}}"
-{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/templates/hbase-smoke.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/templates/hbase-smoke.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/templates/hbase-smoke.sh.j2
deleted file mode 100644
index 61fe62f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/templates/hbase-smoke.sh.j2
+++ /dev/null
@@ -1,26 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-disable 'ambarismoketest'
-drop 'ambarismoketest'
-create 'ambarismoketest','family'
-put 'ambarismoketest','row01','family:col01','{{service_check_data}}'
-scan 'ambarismoketest'
-exit
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/templates/hbase_client_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/templates/hbase_client_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/templates/hbase_client_jaas.conf.j2
deleted file mode 100644
index 696718e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/templates/hbase_client_jaas.conf.j2
+++ /dev/null
@@ -1,5 +0,0 @@
-Client {
-com.sun.security.auth.module.Krb5LoginModule required
-useKeyTab=false
-useTicketCache=true;
-};

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/templates/hbase_grant_permissions.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/templates/hbase_grant_permissions.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/templates/hbase_grant_permissions.j2
deleted file mode 100644
index 9102d35..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/templates/hbase_grant_permissions.j2
+++ /dev/null
@@ -1,21 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-grant '{{smoke_test_user}}', '{{smokeuser_permissions}}'
-exit
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/templates/hbase_master_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/templates/hbase_master_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/templates/hbase_master_jaas.conf.j2
deleted file mode 100644
index 722cfcc..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/templates/hbase_master_jaas.conf.j2
+++ /dev/null
@@ -1,8 +0,0 @@
-Client {
-com.sun.security.auth.module.Krb5LoginModule required
-useKeyTab=true
-storeKey=true
-useTicketCache=false
-keyTab="{{master_keytab_path}}"
-principal="{{master_jaas_princ}}";
-};

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2
deleted file mode 100644
index cb9b7b0..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2
+++ /dev/null
@@ -1,8 +0,0 @@
-Client {
-com.sun.security.auth.module.Krb5LoginModule required
-useKeyTab=true
-storeKey=true
-useTicketCache=false
-keyTab="{{regionserver_keytab_path}}"
-principal="{{regionserver_jaas_princ}}";
-};

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/templates/regionservers.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/templates/regionservers.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/templates/regionservers.j2
deleted file mode 100644
index b22ae5f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/package/templates/regionservers.j2
+++ /dev/null
@@ -1,2 +0,0 @@
-{% for host in rs_hosts %}{{host}}
-{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/configuration/core-site.xml
deleted file mode 100644
index e244fc7..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/configuration/core-site.xml
+++ /dev/null
@@ -1,167 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
- <!--
-    Licensed to the Apache Software Foundation (ASF) under one or more
-    contributor license agreements.  See the NOTICE file distributed with
-    this work for additional information regarding copyright ownership.
-    The ASF licenses this file to You under the Apache License, Version 2.0
-    (the "License"); you may not use this file except in compliance with
-    the License.  You may obtain a copy of the License at
- 
-        http://www.apache.org/licenses/LICENSE-2.0
- 
-    Unless required by applicable law or agreed to in writing, software
-    distributed under the License is distributed on an "AS IS" BASIS,
-    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-    See the License for the specific language governing permissions and
-    limitations under the License.
- -->
- 
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-<!-- i/o properties -->
-
-  <property>
-    <name>io.file.buffer.size</name>
-    <value>131072</value>
-    <description>The size of buffer for use in sequence files.
-  The size of this buffer should probably be a multiple of hardware
-  page size (4096 on Intel x86), and it determines how much data is
-  buffered during read and write operations.</description>
-  </property>
-
-  <property>
-    <name>io.serializations</name>
-    <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
-  </property>
-
-  <property>
-    <name>io.compression.codecs</name>
-    <value>org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec</value>
-    <description>A list of the compression codec classes that can be used
-                 for compression/decompression.</description>
-  </property>
-
-<!-- file system properties -->
-
-  <property>
-    <name>fs.defaultFS</name>
-    <!-- cluster variant -->
-    <value>hdfs://localhost:8020</value>
-    <description>The name of the default file system.  Either the
-  literal string "local" or a host:port for NDFS.</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>fs.trash.interval</name>
-    <value>360</value>
-    <description>Number of minutes between trash checkpoints.
-  If zero, the trash feature is disabled.
-  </description>
-  </property>
-
-  <!-- ipc properties: copied from kryptonite configuration -->
-  <property>
-    <name>ipc.client.idlethreshold</name>
-    <value>8000</value>
-    <description>Defines the threshold number of connections after which
-               connections will be inspected for idleness.
-  </description>
-  </property>
-
-  <property>
-    <name>ipc.client.connection.maxidletime</name>
-    <value>30000</value>
-    <description>The maximum time after which a client will bring down the
-               connection to the server.
-  </description>
-  </property>
-
-  <property>
-    <name>ipc.client.connect.max.retries</name>
-    <value>50</value>
-    <description>Defines the maximum number of retries for IPC connections.</description>
-  </property>
-
-  <!-- Web Interface Configuration -->
-  <property>
-    <name>mapreduce.jobtracker.webinterface.trusted</name>
-    <value>false</value>
-    <description> If set to true, the web interfaces of JT and NN may contain
-                actions, such as kill job, delete file, etc., that should
-                not be exposed to public. Enable this option if the interfaces
-                are only reachable by those who have the right authorization.
-  </description>
-  </property>
-
- <property>
-   <name>hadoop.security.authentication</name>
-   <value>simple</value>
-   <description>
-   Set the authentication for the cluster. Valid values are: simple or
-   kerberos.
-   </description>
- </property>
-<property>
-  <name>hadoop.security.authorization</name>
-  <value>false</value>
-  <description>
-     Enable authorization for different protocols.
-  </description>
-</property>
-
-  <property>
-    <name>hadoop.security.auth_to_local</name>
-    <value>
-        RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/
-        RULE:[2:$1@$0](jhs@.*)s/.*/mapred/
-        RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/
-        RULE:[2:$1@$0](hm@.*)s/.*/hbase/
-        RULE:[2:$1@$0](rs@.*)s/.*/hbase/
-        DEFAULT
-    </value>
-<description>The mapping from kerberos principal names to local OS mapreduce.job.user.names.
-  So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
-  "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
-The translations rules have 3 sections:
-      base     filter    substitution
-The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.
-
-[1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
-[2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
-[2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
-
-The filter is a regex in parens that must the generated string for the rule to apply.
-
-"(.*%admin)" will take any string that ends in "%admin"
-"(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
-
-Finally, the substitution is a sed rule to translate a regex into a fixed string.
-
-"s/@ACME\.COM//" removes the first instance of "@ACME.COM".
-"s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
-"s/X/Y/g" replaces all of the "X" in the name with "Y"
-
-So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:
-
-RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-DEFAULT
-
-To also translate the names with a second component, you'd make the rules:
-
-RULE:[1:$1@$0](.@ACME.ORG)s/@.//
-RULE:[2:$1@$0](.@ACME.ORG)s/@.//
-DEFAULT
-
-If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
-
-RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
-DEFAULT
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/configuration/global.xml
deleted file mode 100644
index 49d66bb..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/configuration/global.xml
+++ /dev/null
@@ -1,192 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>namenode_host</name>
-    <value></value>
-    <description>NameNode Host.</description>
-  </property>
-  <property>
-    <name>dfs_namenode_name_dir</name>
-    <value>/hadoop/hdfs/namenode</value>
-    <description>NameNode Directories.</description>
-  </property>
-  <property>
-    <name>snamenode_host</name>
-    <value></value>
-    <description>Secondary NameNode.</description>
-  </property>
-  <property>
-    <name>dfs_namenode_checkpoint_dir</name>
-    <value>/hadoop/hdfs/namesecondary</value>
-    <description>Secondary NameNode checkpoint dir.</description>
-  </property>
-  <property>
-    <name>datanode_hosts</name>
-    <value></value>
-    <description>List of Datanode Hosts.</description>
-  </property>
-  <property>
-    <name>dfs_datanode_data_dir</name>
-    <value>/hadoop/hdfs/data</value>
-    <description>Data directories for Data Nodes.</description>
-  </property>
-  <property>
-    <name>hdfs_log_dir_prefix</name>
-    <value>/var/log/hadoop</value>
-    <description>Hadoop Log Dir Prefix</description>
-  </property>
-  <property>
-    <name>hadoop_pid_dir_prefix</name>
-    <value>/var/run/hadoop</value>
-    <description>Hadoop PID Dir Prefix</description>
-  </property>
-  <property>
-    <name>dfs_webhdfs_enabled</name>
-    <value>true</value>
-    <description>WebHDFS enabled</description>
-  </property>
-  <property>
-    <name>hadoop_heapsize</name>
-    <value>1024</value>
-    <description>Hadoop maximum Java heap size</description>
-  </property>
-  <property>
-    <name>namenode_heapsize</name>
-    <value>1024</value>
-    <description>NameNode Java heap size</description>
-  </property>
-  <property>
-    <name>namenode_opt_newsize</name>
-    <value>200</value>
-    <description>NameNode new generation size</description>
-  </property>
-  <property>
-    <name>namenode_opt_maxnewsize</name>
-    <value>200</value>
-    <description>NameNode maximum new generation size</description>
-  </property>
-  <property>
-    <name>datanode_du_reserved</name>
-    <value>1073741824</value>
-    <description>Reserved space for HDFS</description>
-  </property>
-  <property>
-    <name>dtnode_heapsize</name>
-    <value>1024</value>
-    <description>DataNode maximum Java heap size</description>
-  </property>
-  <property>
-    <name>dfs_datanode_failed_volume_tolerated</name>
-    <value>0</value>
-    <description>DataNode volumes failure toleration</description>
-  </property>
-  <property>
-    <name>dfs_namenode_checkpoint_period</name>
-    <value>21600</value>
-    <description>HDFS Maximum Checkpoint Delay</description>
-  </property>
-  <property>
-    <name>fs_checkpoint_size</name>
-    <value>0.5</value>
-    <description>FS Checkpoint Size.</description>
-  </property>
-  <property>
-    <name>proxyuser_group</name>
-    <value>users</value>
-    <description>Proxy user group.</description>
-  </property>
-  <property>
-    <name>dfs_exclude</name>
-    <value></value>
-    <description>HDFS Exclude hosts.</description>
-  </property>
-  <property>
-    <name>dfs_replication</name>
-    <value>3</value>
-    <description>Default Block Replication.</description>
-  </property>
-  <property>
-    <name>dfs_block_local_path_access_user</name>
-    <value>hbase</value>
-    <description>Default Block Replication.</description>
-  </property>
-  <property>
-    <name>dfs_datanode_address</name>
-    <value>50010</value>
-    <description>Port for datanode address.</description>
-  </property>
-  <property>
-    <name>dfs_datanode_http_address</name>
-    <value>50075</value>
-    <description>Port for datanode address.</description>
-  </property>
-  <property>
-    <name>dfs_datanode_data_dir_perm</name>
-    <value>750</value>
-    <description>Datanode dir perms.</description>
-  </property>
-
-  <property>
-    <name>security_enabled</name>
-    <value>false</value>
-    <description>Hadoop Security</description>
-  </property>
-  <property>
-    <name>kerberos_domain</name>
-    <value>EXAMPLE.COM</value>
-    <description>Kerberos realm.</description>
-  </property>
-  <property>
-    <name>kadmin_pw</name>
-    <value></value>
-    <description>Kerberos realm admin password</description>
-  </property>
-  <property>
-    <name>keytab_path</name>
-    <value>/etc/security/keytabs</value>
-    <description>Kerberos keytab path.</description>
-  </property>
-  
-  <property>
-    <name>keytab_path</name>
-    <value>/etc/security/keytabs</value>
-    <description>KeyTab Directory.</description>
-  </property>
-    <property>
-    <name>namenode_formatted_mark_dir</name>
-    <value>/var/run/hadoop/hdfs/namenode/formatted/</value>
-    <description>Formatteed Mark Directory.</description>
-  </property>
-    <property>
-    <name>hdfs_user</name>
-    <value>hdfs</value>
-    <description>User and Groups.</description>
-  </property>
-  <property>
-    <name>lzo_enabled</name>
-    <value>true</value>
-    <description>LZO compression enabled</description>
-  </property>
-  
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/configuration/hadoop-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/configuration/hadoop-policy.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/configuration/hadoop-policy.xml
deleted file mode 100644
index 51b01bb..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/configuration/hadoop-policy.xml
+++ /dev/null
@@ -1,134 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientProtocol, which is used by user code
-    via the DistributedFileSystem.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.client.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
-    for block recovery.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for DatanodeProtocol, which is used by datanodes to
-    communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.datanode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
-    for updating generation timestamp.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.namenode.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for NamenodeProtocol, the protocol used by the secondary
-    namenode to communicate with the namenode.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.inter.tracker.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for InterTrackerProtocol, used by the tasktrackers to
-    communicate with the jobtracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.job.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for JobSubmissionProtocol, used by job clients to
-    communciate with the jobtracker for job submission, querying job status etc.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.job.task.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
-    tasks to communicate with the parent tasktracker.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
- <property>
-    <name>security.admin.operations.protocol.acl</name>
-    <value>hadoop</value>
-    <description>ACL for AdminOperationsProtocol. Used for admin commands.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.refresh.usertogroups.mappings.protocol.acl</name>
-    <value>hadoop</value>
-    <description>ACL for RefreshUserMappingsProtocol. Used to refresh
-    users mappings. The ACL is a comma-separated list of user and
-    group names. The user and group list is separated by a blank. For
-    e.g. "alice,bob users,wheel".  A special value of "*" means all
-    users are allowed.</description>
-  </property>
-
-<property>
-    <name>security.refresh.policy.protocol.acl</name>
-    <value>hadoop</value>
-    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
-    dfsadmin and mradmin commands to refresh the security policy in-effect.
-    The ACL is a comma-separated list of user and group names. The user and
-    group list is separated by a blank. For e.g. "alice,bob users,wheel".
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/configuration/hdfs-site.xml
deleted file mode 100644
index 7e8bfba..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/configuration/hdfs-site.xml
+++ /dev/null
@@ -1,513 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-
-  <!-- file system properties -->
-
-  <property>
-    <name>dfs.namenode.name.dir</name>
-    <!-- cluster variant -->
-    <value>/hadoop/hdfs/namenode</value>
-    <description>Determines where on the local filesystem the DFS name node
-      should store the name table.  If this is a comma-delimited list
-      of directories then the name table is replicated in all of the
-      directories, for redundancy. </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.support.append</name>
-    <value>true</value>
-    <description>to enable dfs append</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.webhdfs.enabled</name>
-    <value>true</value>
-    <description>Whether to enable WebHDFS feature</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.datanode.failed.volumes.tolerated</name>
-    <value>0</value>
-    <description> Number of failed disks a DataNode would tolerate before it stops offering service</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.datanode.data.dir</name>
-    <value>/hadoop/hdfs/data</value>
-    <description>Determines where on the local filesystem an DFS data node
-      should store its blocks.  If this is a comma-delimited
-      list of directories, then data will be stored in all named
-      directories, typically on different devices.
-      Directories that do not exist are ignored.
-    </description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.hosts.exclude</name>
-    <value>/etc/hadoop/conf/dfs.exclude</value>
-    <description>Names a file that contains a list of hosts that are
-      not permitted to connect to the namenode.  The full pathname of the
-      file must be specified.  If the value is empty, no hosts are
-      excluded.</description>
-  </property>
-
-  <!--
-    <property>
-      <name>dfs.hosts</name>
-      <value>/etc/hadoop/conf/dfs.include</value>
-      <description>Names a file that contains a list of hosts that are
-      permitted to connect to the namenode. The full pathname of the file
-      must be specified.  If the value is empty, all hosts are
-      permitted.</description>
-    </property>
-  -->
-
-  <property>
-    <name>dfs.namenode.checkpoint.dir</name>
-    <value>/hadoop/hdfs/namesecondary</value>
-    <description>Determines where on the local filesystem the DFS secondary
-      name node should store the temporary images to merge.
-      If this is a comma-delimited list of directories then the image is
-      replicated in all of the directories for redundancy.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.checkpoint.edits.dir</name>
-    <value>${dfs.namenode.checkpoint.dir}</value>
-    <description>Determines where on the local filesystem the DFS secondary
-      name node should store the temporary edits to merge.
-      If this is a comma-delimited list of directoires then teh edits is
-      replicated in all of the directoires for redundancy.
-      Default value is same as dfs.namenode.checkpoint.dir
-    </description>
-  </property>
-
-
-  <property>
-    <name>dfs.namenode.checkpoint.period</name>
-    <value>21600</value>
-    <description>The number of seconds between two periodic checkpoints.
-    </description>
-  </property>
-
-  <property>
-    <name>fs.checkpoint.size</name>
-    <value>67108864</value>
-    <description>The size of the current edit log (in bytes) that triggers
-      a periodic checkpoint even if the maximum checkpoint delay is not reached
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.replication.max</name>
-    <value>50</value>
-    <description>Maximal block replication.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.replication</name>
-    <value>3</value>
-    <description>Default block replication.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.heartbeat.interval</name>
-    <value>3</value>
-    <description>Determines datanode heartbeat interval in seconds.</description>
-  </property>
-
-  <property>
-    <name>dfs.heartbeat.interval</name>
-    <value>3</value>
-    <description>Determines datanode heartbeat interval in seconds.</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.safemode.threshold-pct</name>
-    <value>1.0f</value>
-    <description>
-      Specifies the percentage of blocks that should satisfy
-      the minimal replication requirement defined by dfs.namenode.replication.min.
-      Values less than or equal to 0 mean not to start in safe mode.
-      Values greater than 1 will make safe mode permanent.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.balance.bandwidthPerSec</name>
-    <value>6250000</value>
-    <description>
-      Specifies the maximum amount of bandwidth that each datanode
-      can utilize for the balancing purpose in term of
-      the number of bytes per second.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.https.port</name>
-    <value>50470</value>
-    <description>
-      This property is used by HftpFileSystem.
-    </description>
-  </property>
-
-  <property>
-    <name>ambari.dfs.datanode.port</name>
-    <value>50010</value>
-    <description>
-      The datanode port for data transfer. This property is effective only if referenced from dfs.datanode.address property.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.address</name>
-    <value>0.0.0.0:${ambari.dfs.datanode.port}</value>
-    <description>
-      The datanode server address and port for data transfer.
-    </description>
-  </property>
-
-  <property>
-    <name>ambari.dfs.datanode.http.port</name>
-    <value>50075</value>
-    <description>
-      The datanode http port. This property is effective only if referenced from dfs.datanode.http.address property.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.http.address</name>
-    <value>0.0.0.0:${ambari.dfs.datanode.http.port}</value>
-    <description>
-      The datanode http server address and port.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.blocksize</name>
-    <value>134217728</value>
-    <description>The default block size for new files.</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.http-address</name>
-    <value>localhost:50070</value>
-    <description>The name of the default file system.  Either the
-      literal string "local" or a host:port for NDFS.</description>
-    <final>true</final>
-  </property>
-
-  <property>
-    <name>dfs.datanode.du.reserved</name>
-    <!-- cluster variant -->
-    <value>1073741824</value>
-    <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.ipc.address</name>
-    <value>0.0.0.0:8010</value>
-    <description>
-      The datanode ipc server address and port.
-      If the port is 0 then the server will start on a free port.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.blockreport.initialDelay</name>
-    <value>120</value>
-    <description>Delay for first block report in seconds.</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.handler.count</name>
-    <value>40</value>
-    <description>The number of server threads for the namenode.</description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.max.transfer.threads</name>
-    <value>1024</value>
-    <description>PRIVATE CONFIG VARIABLE</description>
-  </property>
-
-  <!-- Permissions configuration -->
-
-  <property>
-    <name>fs.permissions.umask-mode</name>
-    <value>022</value>
-    <description>
-      The octal umask used when creating files and directories.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.permissions.enabled</name>
-    <value>true</value>
-    <description>
-      If "true", enable permission checking in HDFS.
-      If "false", permission checking is turned off,
-      but all other behavior is unchanged.
-      Switching from one parameter value to the other does not change the mode,
-      owner or group of files or directories.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.permissions.superusergroup</name>
-    <value>hdfs</value>
-    <description>The name of the group of super-users.</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.handler.count</name>
-    <value>100</value>
-    <description>Added to grow Queue size so that more client connections are allowed</description>
-  </property>
-
-  <property>
-    <name>dfs.block.access.token.enable</name>
-    <value>true</value>
-    <description>
-      If "true", access tokens are used as capabilities for accessing datanodes.
-      If "false", no access tokens are checked on accessing datanodes.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.kerberos.principal</name>
-    <value></value>
-    <description>
-      Kerberos principal name for the NameNode
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.kerberos.principal</name>
-    <value></value>
-    <description>
-      Kerberos principal name for the secondary NameNode.
-    </description>
-  </property>
-
-
-  <!--
-    This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
-  -->
-  <property>
-    <name>dfs.namenode.kerberos.https.principal</name>
-    <value></value>
-    <description>The Kerberos principal for the host that the NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.kerberos.https.principal</name>
-    <value></value>
-    <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
-
-  </property>
-
-  <property>
-    <!-- cluster variant -->
-    <name>dfs.namenode.secondary.http-address</name>
-    <value>localhost:50090</value>
-    <description>Address of secondary namenode web server</description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.principal</name>
-    <value></value>
-    <description>
-      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
-      HTTP SPENGO specification.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.web.authentication.kerberos.keytab</name>
-    <value></value>
-    <description>
-      The Kerberos keytab file with the credentials for the
-      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.kerberos.principal</name>
-    <value></value>
-    <description>
-      The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.keytab.file</name>
-    <value></value>
-    <description>
-      Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.secondary.namenode.keytab.file</name>
-    <value></value>
-    <description>
-      Combined keytab file containing the namenode service and host principals.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.datanode.keytab.file</name>
-    <value></value>
-    <description>
-      The filename of the keytab file for the DataNode.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.https-address</name>
-    <value>localhost:50470</value>
-    <description>The https address where namenode binds</description>
-
-  </property>
-
-  <property>
-    <name>dfs.datanode.data.dir.perm</name>
-    <value>750</value>
-    <description>The permissions that should be there on dfs.datanode.data.dir
-      directories. The datanode will not come up if the permissions are
-      different on existing dfs.datanode.data.dir directories. If the directories
-      don't exist, they will be created with this permission.</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.accesstime.precision</name>
-    <value>0</value>
-    <description>The access time for HDFS file is precise upto this value.
-      The default value is 1 hour. Setting a value of 0 disables
-      access times for HDFS.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.cluster.administrators</name>
-    <value> hdfs</value>
-    <description>ACL for who all can view the default servlets in the HDFS</description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.avoid.read.stale.datanode</name>
-    <value>true</value>
-    <description>
-      Indicate whether or not to avoid reading from stale datanodes whose
-      heartbeat messages have not been received by the namenode for more than a
-      specified time interval.
-    </description>
-  </property>
-  <property>
-    <name>dfs.namenode.avoid.write.stale.datanode</name>
-    <value>true</value>
-    <description>
-      Indicate whether or not to avoid writing to stale datanodes whose
-      heartbeat messages have not been received by the namenode for more than a
-      specified time interval.
-    </description>
-  </property>
-  <property>
-    <name>dfs.namenode.write.stale.datanode.ratio</name>
-    <value>1.0f</value>
-    <description>When the ratio of number stale datanodes to total datanodes marked is greater
-      than this ratio, stop avoiding writing to stale nodes so as to prevent causing hotspots.
-    </description>
-  </property>
-  <property>
-    <name>dfs.namenode.stale.datanode.interval</name>
-    <value>30000</value>
-    <description>Datanode is stale after not getting a heartbeat in this interval in ms</description>
-  </property>
-
-  <property>
-    <name>dfs.journalnode.http-address</name>
-    <value>0.0.0.0:8480</value>
-    <description>The address and port the JournalNode web UI listens on.
-      If the port is 0 then the server will start on a free port. </description>
-  </property>
-
-  <property>
-    <name>dfs.journalnode.edits.dir</name>
-    <value>/grid/0/hdfs/journal</value>
-    <description>The path where the JournalNode daemon will store its local state. </description>
-  </property>
-
-  <!-- HDFS Short-Circuit Local Reads -->
-
-  <property>
-    <name>dfs.client.read.shortcircuit</name>
-    <value>true</value>
-    <description>
-      This configuration parameter turns on short-circuit local reads.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.client.read.shortcircuit.skip.checksum</name>
-    <value></value>
-    <description>Enable/disbale skipping the checksum check</description>
-  </property>
-
-  <property>
-    <name>dfs.domain.socket.path</name>
-    <value>/var/lib/hadoop-hdfs/dn_socket</value>
-  </property>
-
-  <property>
-    <name>dfs.client.read.shortcircuit.streams.cache.size</name>
-    <value>4096</value>
-    <description>
-      The DFSClient maintains a cache of recently opened file descriptors. This
-      parameter controls the size of that cache. Setting this higher will use
-      more file descriptors, but potentially provide better performance on
-      workloads involving lots of seeks.
-    </description>
-  </property>
-
-  <property>
-    <name>dfs.namenode.name.dir.restore</name>
-    <value>true</value>
-    <description>Set to true to enable NameNode to attempt recovering a previously failed dfs.namenode.name.dir.
-      When enabled, a recovery of any failed directory is attempted during checkpoint.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/metainfo.xml
deleted file mode 100644
index 3de6ce5..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/metainfo.xml
+++ /dev/null
@@ -1,152 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>HDFS</name>
-      <comment>Apache Hadoop Distributed File System</comment>
-      <version>2.1.0.2.0.6.0</version>
-
-      <components>
-        <component>
-          <name>NAMENODE</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/namenode.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <customCommands>
-            <customCommand>
-              <name>DECOMMISSION</name>
-              <commandScript>
-                <script>scripts/namenode.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>600</timeout>
-              </commandScript>
-            </customCommand>
-          </customCommands>
-        </component>
-
-        <component>
-          <name>DATANODE</name>
-          <category>SLAVE</category>
-          <commandScript>
-            <script>scripts/datanode.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>SECONDARY_NAMENODE</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/snamenode.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>HDFS_CLIENT</name>
-          <category>CLIENT</category>
-          <commandScript>
-            <script>scripts/hdfs_client.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>JOURNALNODE</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/journalnode.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>ZKFC</name>
-          <category>SLAVE</category>
-          <commandScript>
-            <script>scripts/zkfc_slave.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>lzo</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hadoop</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hadoop-libhdfs</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hadoop-lzo</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hadoop-lzo-native</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>snappy</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>snappy-devel</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>ambari-log4j</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <configuration-dependencies>
-        <config-type>core-site</config-type>
-        <config-type>global</config-type>
-        <config-type>hdfs-site</config-type>
-        <config-type>hadoop-policy</config-type>
-      </configuration-dependencies>
-    </service>
-  </services>
-</metainfo>


[07/37] AMBARI-4341. Rename 2.0.8 to 2.1.1 in the stack definition. (mahadev)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_webui.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_webui.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_webui.sh
new file mode 100644
index 0000000..b23045e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_webui.sh
@@ -0,0 +1,87 @@
+#!/bin/bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+checkurl () {
+  url=$1
+  curl $url -o /dev/null
+  echo $?
+}
+
+service=$1
+host=$2
+port=$3
+
+if [[ -z "$service" || -z "$host" ]]; then
+  echo "UNKNOWN: Invalid arguments; Usage: check_webui.sh service_name host_name";
+  exit 3;
+fi
+
+case "$service" in
+
+jobtracker) 
+    jtweburl="http://$host:$port"
+    if [[ `checkurl "$jtweburl"` -ne 0 ]]; then 
+      echo "WARNING: Jobtracker web UI not accessible : $jtweburl";
+      exit 1;
+    fi
+    ;;
+namenode)
+    nnweburl="http://$host:$port"
+    if [[ `checkurl "$nnweburl"` -ne 0 ]] ; then 
+      echo "WARNING: NameNode Web UI not accessible : $nnweburl";
+      exit 1;
+    fi
+    ;;
+jobhistory)
+    jhweburl="http://$host:$port/jobhistoryhome.jsp"
+    if [[ `checkurl "$jhweburl"` -ne 0 ]]; then 
+      echo "WARNING: HistoryServer Web UI not accessible : $jhweburl";
+      exit 1;
+    fi
+    ;;
+hbase)
+    hbaseweburl="http://$host:$port/master-status"
+    if [[ `checkurl "$hbaseweburl"` -ne 0 ]]; then 
+      echo "WARNING: HBase Master Web UI not accessible : $hbaseweburl";
+      exit 1;
+    fi
+    ;;
+resourcemanager)
+    rmweburl="http://$host:$port/cluster"
+    if [[ `checkurl "$rmweburl"` -ne 0 ]]; then 
+      echo "WARNING: ResourceManager Web UI not accessible : $rmweburl";
+      exit 1;
+    fi
+    ;;
+historyserver2)
+    hsweburl="http://$host:$port/jobhistory"
+    if [[ `checkurl "$hsweburl"` -ne 0 ]]; then 
+      echo "WARNING: HistoryServer Web UI not accessible : $hsweburl";
+      exit 1;
+    fi
+    ;;
+*) echo "UNKNOWN: Invalid service name [$service], valid options [jobtracker|jobhistory|hbase|namenode|resourcemanager|historyserver2]"
+   exit 3
+   ;;
+esac
+
+echo "OK: Successfully accessed $service Web UI"
+exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/hdp_nagios_init.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/hdp_nagios_init.php b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/hdp_nagios_init.php
new file mode 100644
index 0000000..487eb43
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/hdp_nagios_init.php
@@ -0,0 +1,81 @@
+<?php
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Common functions called from other alerts
+ *
+ */
+ 
+ /*
+ * Function for kinit. Checks if security enabled and klist for this principal doesn't returns nothing,
+ * make kinit call in this case.
+ */
+  function kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name) {
+    if($security_enabled === 'true') {
+    
+      $is_logined = is_logined($principal_name);
+      
+      if (!$is_logined)
+        $status = kinit($kinit_path_local, $keytab_path, $principal_name);
+      else
+        $status = array(0, '');
+    } else {
+      $status = array(0, '');
+    }
+  
+    return $status;
+  }
+  
+  
+  /*
+  * Checks if user is logined on kerberos
+  */
+  function is_logined($principal_name) {
+    $check_cmd = "klist|grep $principal_name 1> /dev/null 2>/dev/null ; [[ $? != 0 ]] && echo 1";
+    $check_output =  shell_exec($check_cmd);
+    
+    if ($check_output)
+      return false;
+    else
+      return true;
+  }
+
+  /*
+  * Runs kinit command.
+  */
+  function kinit($kinit_path_local, $keytab_path, $principal_name) {
+    $init_cmd = "$kinit_path_local -kt $keytab_path $principal_name 2>&1";
+    $kinit_output = shell_exec($init_cmd);
+    if ($kinit_output) 
+      $status = array(1, $kinit_output);
+    else
+      $status = array(0, '');
+      
+    return $status;
+  }
+
+  function logout() {
+    if (shell_exec("rm -f /tmp/krb5cc_".trim(shell_exec('id -u'))) == "" ) 
+      $status = true;
+    else
+      $status = false;
+      
+    return $status;
+  }
+ 
+ ?>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/functions.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/functions.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/functions.py
new file mode 100644
index 0000000..964225e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/functions.py
@@ -0,0 +1,31 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+from resource_management.libraries.script.config_dictionary import UnknownConfiguration
+
+def get_port_from_url(address):
+  if not is_empty(address):
+    return address.split(':')[-1]
+  else:
+    return address
+  
+def is_empty(var):
+  return isinstance(var, UnknownConfiguration)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/nagios.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/nagios.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/nagios.py
new file mode 100644
index 0000000..af09e87
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/nagios.py
@@ -0,0 +1,97 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+from nagios_server_config import nagios_server_config
+
+def nagios():
+  import params
+
+  File( params.nagios_httpd_config_file,
+    owner = params.nagios_user,
+    group = params.nagios_group,
+    content = Template("nagios.conf.j2"),
+    mode   = 0644
+  )
+
+  # enable snmpd
+  Execute( "service snmpd start; chkconfig snmpd on",
+    path = "/usr/local/bin/:/bin/:/sbin/"
+  )
+  
+  Directory( params.conf_dir,
+    owner = params.nagios_user,
+    group = params.nagios_group
+  )
+
+  Directory( [params.plugins_dir, params.nagios_obj_dir])
+
+  Directory( params.nagios_pid_dir,
+    owner = params.nagios_user,
+    group = params.nagios_group,
+    mode = 0755,
+    recursive = True
+  )
+
+  Directory( [params.nagios_var_dir, params.check_result_path, params.nagios_rw_dir],
+    owner = params.nagios_user,
+    group = params.nagios_group,
+    recursive = True
+  )
+  
+  Directory( [params.nagios_log_dir, params.nagios_log_archives_dir],
+    owner = params.nagios_user,
+    group = params.nagios_group,
+    mode = 0755
+  )
+
+  nagios_server_config()
+
+  set_web_permisssions()
+
+  File( format("{conf_dir}/command.cfg"),
+    owner = params.nagios_user,
+    group = params.nagios_group
+  )
+  
+  
+def set_web_permisssions():
+  import params
+
+  cmd = format("{htpasswd_cmd} -c -b  /etc/nagios/htpasswd.users {nagios_web_login} {nagios_web_password}")
+  test = format("grep {nagios_web_login} /etc/nagios/htpasswd.users")
+  Execute( cmd,
+    not_if = test
+  )
+
+  File( "/etc/nagios/htpasswd.users",
+    owner = params.nagios_user,
+    group = params.nagios_group,
+    mode  = 0640
+  )
+
+  if System.get_instance().platform == "suse":
+    command = format("usermod -G {nagios_group} wwwrun")
+  else:
+    command = format("usermod -a -G {nagios_group} apache")
+  
+  Execute( command)

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/nagios_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/nagios_server.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/nagios_server.py
new file mode 100644
index 0000000..02685c7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/nagios_server.py
@@ -0,0 +1,87 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+from nagios import nagios
+from nagios_service import nagios_service
+
+         
+class NagiosServer(Script):
+  def install(self, env):
+    remove_conflicting_packages()
+    self.install_packages(env)
+    self.configure(env)
+    
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    nagios()
+
+    
+  def start(self, env):
+    import params
+    env.set_params(params)
+
+    self.configure(env) # done for updating configs after Security enabled
+    nagios_service(action='start')
+
+    
+  def stop(self, env):
+    import params
+    env.set_params(params)
+    
+    nagios_service(action='stop')
+
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.nagios_pid_file)
+    
+def remove_conflicting_packages():  
+  Package( 'hdp_mon_nagios_addons',
+    action = "remove"
+  )
+
+  Package( 'nagios-plugins',
+    action = "remove"
+  )
+
+  Execute( "rpm -e --allmatches --nopostun nagios",
+    path    = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+    ignore_failures = True 
+  )
+
+def main():
+  command_type = sys.argv[1] if len(sys.argv)>1 else "install"
+  print "Running "+command_type
+  command_data_file = '/var/lib/ambari-agent/data/command-3.json'
+  basedir = '/root/ambari/ambari-server/src/main/resources/stacks/HDP/2.0._/services/NAGIOS/package'
+  stroutfile = '/1.txt'
+  sys.argv = ["", command_type, command_data_file, basedir, stroutfile]
+  
+  NagiosServer().execute()
+  
+if __name__ == "__main__":
+  #main()
+  NagiosServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/nagios_server_config.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/nagios_server_config.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/nagios_server_config.py
new file mode 100644
index 0000000..b3e639c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/nagios_server_config.py
@@ -0,0 +1,91 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+def nagios_server_config():
+  import params
+  
+  nagios_server_configfile( 'nagios.cfg', 
+                            config_dir = params.conf_dir, 
+                            group = params.nagios_group
+  )
+  nagios_server_configfile( 'resource.cfg', 
+                            config_dir = params.conf_dir, 
+                            group = params.nagios_group
+  )
+  nagios_server_configfile( 'hadoop-hosts.cfg')
+  nagios_server_configfile( 'hadoop-hostgroups.cfg')
+  nagios_server_configfile( 'hadoop-servicegroups.cfg')
+  nagios_server_configfile( 'hadoop-services.cfg')
+  nagios_server_configfile( 'hadoop-commands.cfg')
+  nagios_server_configfile( 'contacts.cfg')
+  
+  if System.get_instance().platform != "suse":
+    nagios_server_configfile( 'nagios',
+                              config_dir = '/etc/init.d/', 
+                              mode = 0755, 
+                              owner = 'root', 
+                              group = 'root'
+    )
+
+  nagios_server_check( 'check_cpu.pl')
+  nagios_server_check( 'check_datanode_storage.php')
+  nagios_server_check( 'check_aggregate.php')
+  nagios_server_check( 'check_hdfs_blocks.php')
+  nagios_server_check( 'check_hdfs_capacity.php')
+  nagios_server_check( 'check_rpcq_latency.php')
+  nagios_server_check( 'check_webui.sh')
+  nagios_server_check( 'check_name_dir_status.php')
+  nagios_server_check( 'check_oozie_status.sh')
+  nagios_server_check( 'check_templeton_status.sh')
+  nagios_server_check( 'check_hive_metastore_status.sh')
+  nagios_server_check( 'check_hue_status.sh')
+  nagios_server_check( 'check_mapred_local_dir_used.sh')
+  nagios_server_check( 'check_nodemanager_health.sh')
+  nagios_server_check( 'check_namenodes_ha.sh')
+  nagios_server_check( 'hdp_nagios_init.php')
+
+
+def nagios_server_configfile(
+  name,
+  owner = None,
+  group = None,
+  config_dir = None,
+  mode = None
+):
+  import params
+  owner = params.nagios_user if not owner else owner
+  group = params.user_group if not group else group
+  config_dir = params.nagios_obj_dir if not config_dir else config_dir
+  
+  TemplateConfig( format("{config_dir}/{name}"),
+    owner          = owner,
+    group          = group,
+    mode           = mode
+  )
+
+def nagios_server_check(name):
+  File( format("{plugins_dir}/{name}"),
+    content = StaticFile(name), 
+    mode = 0755
+  )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/nagios_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/nagios_service.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/nagios_service.py
new file mode 100644
index 0000000..cc411b5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/nagios_service.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+def nagios_service(action='start'): # start or stop
+  import params
+
+  if action == 'start':
+   command = "service nagios start"
+  elif action == 'stop':
+   command = format("service nagios stop && rm -f {nagios_pid_file}")
+
+  Execute( command,
+     path    = "/usr/local/bin/:/bin/:/sbin/"      
+  )
+  MonitorWebserver("restart")
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/params.py
new file mode 100644
index 0000000..bd7135e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/params.py
@@ -0,0 +1,162 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+from functions import get_port_from_url
+import status_params
+
+# server configurations
+config = Script.get_config()
+
+conf_dir = "/etc/nagios"
+nagios_var_dir = "/var/nagios"
+nagios_rw_dir = "/var/nagios/rw"
+plugins_dir = "/usr/lib64/nagios/plugins"
+nagios_obj_dir = "/etc/nagios/objects"
+check_result_path = "/var/nagios/spool/checkresults"
+nagios_httpd_config_file = format("/etc/httpd/conf.d/nagios.conf")
+nagios_log_dir = "/var/log/nagios"
+nagios_log_archives_dir = format("{nagios_log_dir}/archives")
+nagios_host_cfg = format("{nagios_obj_dir}/hadoop-hosts.cfg")
+nagios_lookup_daemon_str = "/usr/sbin/nagios"
+nagios_pid_dir = status_params.nagios_pid_dir
+nagios_pid_file = status_params.nagios_pid_file
+nagios_resource_cfg = format("{conf_dir}/resource.cfg")
+nagios_hostgroup_cfg = format("{nagios_obj_dir}/hadoop-hostgroups.cfg")
+nagios_servicegroup_cfg = format("{nagios_obj_dir}/hadoop-servicegroups.cfg")
+nagios_service_cfg = format("{nagios_obj_dir}/hadoop-services.cfg")
+nagios_command_cfg = format("{nagios_obj_dir}/hadoop-commands.cfg")
+eventhandlers_dir = "/usr/lib/nagios/eventhandlers"
+nagios_principal_name = default("nagios_principal_name", "nagios")
+hadoop_ssl_enabled = False
+
+namenode_metadata_port = "8020"
+oozie_server_port = "11000"
+# different to HDP1    
+namenode_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.namenode.http-address'])
+# different to HDP1  
+snamenode_port = get_port_from_url(config['configurations']['hdfs-site']["dfs.namenode.secondary.http-address"])
+
+hbase_master_rpc_port = "60000"
+rm_port = get_port_from_url(config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'])
+nm_port = "8042"
+hs_port = get_port_from_url(config['configurations']['mapred-site']['mapreduce.jobhistory.webapp.address'])
+journalnode_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.journalnode.http-address'])
+datanode_port = config['configurations']['hdfs-site']['ambari.dfs.datanode.http.port']
+flume_port = "4159"
+hive_metastore_port = config['configurations']['global']['hive_metastore_port'] #"9083"
+templeton_port = config['configurations']['webhcat-site']['templeton.port'] #"50111"
+hbase_rs_port = "60030"
+
+# this is different for HDP1
+nn_metrics_property = "FSNamesystem"
+clientPort = config['configurations']['global']['clientPort'] #ZK 
+
+
+java64_home = config['hostLevelParams']['java_home']
+security_enabled = config['configurations']['global']['security_enabled']
+
+nagios_keytab_path = default("nagios_keytab_path", "/etc/security/keytabs/nagios.service.keytab")
+kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+
+dfs_ha_enabled = False
+dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
+dfs_ha_namenode_ids = default(format("hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
+if dfs_ha_namenode_ids:
+  dfs_ha_namenode_ids_array_len = len(dfs_ha_namenode_ids.split(","))
+  if dfs_ha_namenode_ids_array_len > 1:
+    dfs_ha_enabled = True
+
+ganglia_port = "8651"
+ganglia_collector_slaves_port = "8660"
+ganglia_collector_namenode_port = "8661"
+ganglia_collector_jobtracker_port = "8662"
+ganglia_collector_hbase_port = "8663"
+ganglia_collector_rm_port = "8664"
+ganglia_collector_nm_port = "8660"
+ganglia_collector_hs_port = "8666"
+  
+all_ping_ports = config['clusterHostInfo']['all_ping_ports']
+
+if System.get_instance().platform == "suse":
+  nagios_p1_pl = "/usr/lib/nagios/p1.pl"
+  htpasswd_cmd = "htpasswd2"
+else:
+  nagios_p1_pl = "/usr/bin/p1.pl"
+  htpasswd_cmd = "htpasswd"
+  
+nagios_user = config['configurations']['global']['nagios_user']
+nagios_group = config['configurations']['global']['nagios_group']
+nagios_web_login = config['configurations']['global']['nagios_web_login']
+nagios_web_password = config['configurations']['global']['nagios_web_password']
+user_group = config['configurations']['global']['user_group']
+nagios_contact = config['configurations']['global']['nagios_contact']
+
+namenode_host = default("/clusterHostInfo/namenode_host", None)
+_snamenode_host = default("/clusterHostInfo/snamenode_host", None)
+_jtnode_host = default("/clusterHostInfo/jtnode_host", None)
+_slave_hosts = default("/clusterHostInfo/slave_hosts", None)
+_journalnode_hosts = default("/clusterHostInfo/journalnode_hosts", None)
+_zkfc_hosts = default("/clusterHostInfo/zkfc_hosts", None)
+_rm_host = default("/clusterHostInfo/rm_host", None)
+_nm_hosts = default("/clusterHostInfo/nm_hosts", None)
+_hs_host = default("/clusterHostInfo/hs_host", None)
+_zookeeper_hosts = default("/clusterHostInfo/zookeeper_hosts", None)
+_flume_hosts = default("/clusterHostInfo/flume_hosts", None)
+_nagios_server_host = default("/clusterHostInfo/nagios_server_host",None)
+_ganglia_server_host = default("/clusterHostInfo/ganglia_server_host",None)
+
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts",None)
+_hive_server_host = default("/clusterHostInfo/hive_server_host",None)
+_oozie_server = default("/clusterHostInfo/oozie_server",None)
+_webhcat_server_host = default("/clusterHostInfo/webhcat_server_host",None)
+# can differ on HDP1
+#_mapred_tt_hosts = _slave_hosts
+#if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
+_hbase_rs_hosts = default("/clusterHostInfo/hbase_rs_hosts", _slave_hosts)
+_hue_server_host = default("/clusterHostInfo/hue_server_host", None)
+all_hosts = config['clusterHostInfo']['all_hosts']
+
+
+hostgroup_defs = {
+    'namenode' : namenode_host,
+    'snamenode' : _snamenode_host,
+    'slaves' : _slave_hosts,
+    # HDP1
+    #'tasktracker-servers' : _mapred_tt_hosts,
+    'agent-servers' : all_hosts,
+    'nagios-server' : _nagios_server_host,
+    'jobtracker' : _jtnode_host,
+    'ganglia-server' : _ganglia_server_host,
+    'flume-servers' : _flume_hosts,
+    'zookeeper-servers' : _zookeeper_hosts,
+    'hbasemasters' : hbase_master_hosts,
+    'hiveserver' : _hive_server_host,
+    'region-servers' : _hbase_rs_hosts,
+    'oozie-server' : _oozie_server,
+    'webhcat-server' : _webhcat_server_host,
+    'hue-server' : _hue_server_host,
+    'resourcemanager' : _rm_host,
+    'nodemanagers' : _nm_hosts,
+    'historyserver2' : _hs_host,
+    'journalnodes' : _journalnode_hosts
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/status_params.py
new file mode 100644
index 0000000..33b35fe
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/scripts/status_params.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+nagios_pid_dir = "/var/run/nagios"
+nagios_pid_file = format("{nagios_pid_dir}/nagios.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/contacts.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/contacts.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/contacts.cfg.j2
new file mode 100644
index 0000000..9dada51
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/contacts.cfg.j2
@@ -0,0 +1,91 @@
+###############################################################################
+# CONTACTS.CFG - SAMPLE CONTACT/CONTACTGROUP DEFINITIONS
+#
+# Last Modified: 05-31-2007
+#
+# NOTES: This config file provides you with some example contact and contact
+#        group definitions that you can reference in host and service
+#        definitions.
+#       
+#        You don't need to keep these definitions in a separate file from your
+#        other object definitions.  This has been done just to make things
+#        easier to understand.
+#
+###############################################################################
+
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+
+###############################################################################
+###############################################################################
+#
+# CONTACTS
+#
+###############################################################################
+###############################################################################
+
+# Just one contact defined by default - the Nagios admin (that's you)
+# This contact definition inherits a lot of default values from the 'generic-contact' 
+# template which is defined elsewhere.
+
+define contact{
+        contact_name    {{nagios_web_login}}                                        ; Short name of user
+        use             generic-contact                                             ; Inherit default values from generic-contact template (defined above)
+        alias           Nagios Admin                                                ; Full name of user
+
+        email           {{nagios_contact}}	; <<***** CHANGE THIS TO YOUR EMAIL ADDRESS ******
+        }
+
+# Contact which writes all Nagios alerts to the system logger.
+define contact{
+        contact_name                    sys_logger         ; Short name of user
+        use                             generic-contact    ; Inherit default values from generic-contact template (defined above)
+        alias                           System Logger      ; Full name of user
+        host_notifications_enabled      1
+        service_notifications_enabled   1
+        service_notification_period     24x7
+        host_notification_period        24x7
+        service_notification_options    w,u,c,r,s
+        host_notification_options       d,u,r,s
+        can_submit_commands             1
+        retain_status_information       1
+        service_notification_commands   service_sys_logger
+        host_notification_commands      host_sys_logger
+        }
+
+###############################################################################
+###############################################################################
+#
+# CONTACT GROUPS
+#
+###############################################################################
+###############################################################################
+
+# We only have one contact in this simple configuration file, so there is
+# no need to create more than one contact group.
+
+define contactgroup {
+        contactgroup_name       admins
+        alias                   Nagios Administrators
+        members                 {{nagios_web_login}},sys_logger
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/hadoop-commands.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/hadoop-commands.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/hadoop-commands.cfg.j2
new file mode 100644
index 0000000..e47a09e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/hadoop-commands.cfg.j2
@@ -0,0 +1,114 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+{% if env.system.platform != "suse" %}
+# 'check_cpu' check remote cpu load
+define command {
+        command_name    check_cpu
+        command_line    $USER1$/check_cpu.pl -H $HOSTADDRESS$ -C hadoop -w $ARG1$ -c $ARG2$
+       }
+{% endif %}
+
+# Check data node storage full 
+define command {
+        command_name    check_datanode_storage
+        command_line    php $USER1$/check_datanode_storage.php -h $HOSTADDRESS$ -p $ARG1$ -w $ARG2$ -c $ARG3$ -e $ARG4$ -k $ARG5$ -r $ARG6$ -t $ARG7$ -s $ARG8$
+       }
+
+define command{
+        command_name    check_hdfs_blocks
+        command_line    php $USER1$/check_hdfs_blocks.php -h $ARG1$ -p $ARG2$ -w $ARG3$ -c $ARG4$ -s $ARG5$ -e $ARG6$ -k $ARG7$ -r $ARG8$ -t $ARG9$ -u $ARG10$
+       }
+
+define command{
+        command_name    check_hdfs_capacity
+        command_line    php $USER1$/check_hdfs_capacity.php -h $ARG1$ -p $ARG2$ -w $ARG3$ -c $ARG4$ -e $ARG5$ -k $ARG6$ -r $ARG7$ -t $ARG8$ -s $ARG9$
+       }
+
+define command{
+        command_name    check_aggregate
+        command_line    php $USER1$/check_aggregate.php -f /var/nagios/status.dat -s 1 -t service -n $ARG1$ -w $ARG2$ -c $ARG3$
+       }
+
+define command{
+        command_name    check_rpcq_latency
+        command_line    php $USER1$/check_rpcq_latency.php -h $HOSTADDRESS$ -p $ARG2$ -n $ARG1$ -w $ARG3$ -c $ARG4$ -e $ARG5$ -k $ARG6$ -r $ARG7$ -t $ARG8$ -s $ARG9$
+       }
+
+define command{
+        command_name    check_nagios
+        command_line    $USER1$/check_nagios -e $ARG1$ -F $ARG2$ -C $ARG3$ 
+       }
+
+define command{
+        command_name    check_webui
+        command_line    $USER1$/check_webui.sh $ARG1$ $HOSTADDRESS$ $ARG2$
+       }
+
+define command{
+        command_name    check_name_dir_status
+        command_line    php $USER1$/check_name_dir_status.php -h $HOSTADDRESS$ -p $ARG1$ -e $ARG2$ -k $ARG3$ -r $ARG4$ -t $ARG5$ -s $ARG6$
+       }
+
+define command{
+        command_name    check_oozie_status
+        command_line    $USER1$/check_oozie_status.sh $HOSTADDRESS$ $ARG1$ $ARG2$ $ARG3$ $ARG4$ $ARG5$ $ARG6$ $ARG7$
+       }
+
+define command{
+        command_name    check_templeton_status
+        command_line    $USER1$/check_templeton_status.sh $HOSTADDRESS$ $ARG1$ $ARG2$ $ARG3$ $ARG4$ $ARG5$ $ARG6$ $ARG7$
+       }
+
+define command{
+        command_name    check_hive_metastore_status
+        command_line    $USER1$/check_hive_metastore_status.sh $HOSTADDRESS$ $ARG1$ $ARG2$ $ARG3$ $ARG4$ $ARG5$ $ARG6$ $ARG7$
+       }
+define command{
+        command_name    check_hue_status
+        command_line    $USER1$/check_hue_status.sh
+       }
+
+define command{
+       command_name    check_mapred_local_dir_used_space
+       command_line    $USER1$/check_mapred_local_dir_used.sh $ARG1$ $ARG2$
+       }
+
+define command{
+       command_name    check_namenodes_ha
+       command_line    $USER1$/check_namenodes_ha.sh $ARG1$ $ARG2$
+       }
+
+define command{
+        command_name    check_nodemanager_health
+        command_line    $USER1$/check_nodemanager_health.sh $HOSTADDRESS$ $ARG1$
+       }
+
+define command{
+        command_name    host_sys_logger
+        command_line    $USER1$/sys_logger.py $HOSTSTATETYPE$ $HOSTATTEMPT$ $HOSTSTATE$ "Host::Ping" "Event Host=$HOSTADDRESS$($HOSTSTATE$), $HOSTOUTPUT$ $LONGHOSTOUTPUT$"
+       }
+
+define command{
+        command_name    service_sys_logger
+        command_line    $USER1$/sys_logger.py $SERVICESTATETYPE$ $SERVICEATTEMPT$ $SERVICESTATE$ "$SERVICEDESC$" "Event Host=$HOSTADDRESS$ Service Description=$SERVICEDESC$($SERVICESTATE$), $SERVICEOUTPUT$ $LONGSERVICEOUTPUT$"
+       }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2
new file mode 100644
index 0000000..2bcbf7c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2
@@ -0,0 +1,15 @@
+{% for name, hosts in hostgroup_defs.iteritems() %}
+{% if hosts %}
+define hostgroup {
+        hostgroup_name  {{name}}
+        alias           {{name}}
+        members         {{','.join(hosts)}}
+}
+{% endif %}
+{% endfor %}
+
+define hostgroup {
+        hostgroup_name  all-servers
+        alias           All Servers
+        members         {{','.join(all_hosts)}}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2
new file mode 100644
index 0000000..62555d4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2
@@ -0,0 +1,16 @@
+{% for host in all_hosts %}
+define host {
+        alias        {{host}}
+        host_name    {{host}}
+        use          linux-server
+        address      {{host}}
+        check_interval         0.25
+        retry_interval         0.25
+        max_check_attempts     4
+        notifications_enabled     1
+        first_notification_delay  0     # Send notification soon after change in the hard state
+        notification_interval     0     # Send the notification once
+        notification_options      d,u,r
+}
+
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2
new file mode 100644
index 0000000..0101ce6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2
@@ -0,0 +1,80 @@
+{% if hostgroup_defs['namenode'] or
+  hostgroup_defs['snamenode']  or
+  hostgroup_defs['slaves'] %}
+define servicegroup {
+  servicegroup_name  HDFS
+  alias  HDFS Checks
+}
+{% endif %}
+{%if hostgroup_defs['jobtracker'] or
+  hostgroup_defs['historyserver2']-%}
+define servicegroup {
+  servicegroup_name  MAPREDUCE
+  alias  MAPREDUCE Checks
+}
+{% endif %}
+{%if hostgroup_defs['resourcemanager'] or
+  hostgroup_defs['nodemanagers'] %}
+define servicegroup {
+  servicegroup_name  YARN
+  alias  YARN Checks
+}
+{% endif %}
+{%if hostgroup_defs['flume-servers'] %}
+define servicegroup {
+  servicegroup_name  FLUME
+  alias  FLUME Checks
+}
+{% endif %}
+{%if hostgroup_defs['hbasemasters'] %}
+define servicegroup {
+  servicegroup_name  HBASE
+  alias  HBASE Checks
+}
+{% endif %}
+{% if hostgroup_defs['oozie-server'] %}
+define servicegroup {
+  servicegroup_name  OOZIE
+  alias  OOZIE Checks
+}
+{% endif %}
+{% if hostgroup_defs['webhcat-server'] %}
+define servicegroup {
+  servicegroup_name  WEBHCAT
+  alias  WEBHCAT Checks
+}
+{% endif %}
+{% if hostgroup_defs['nagios-server'] %}
+define servicegroup {
+  servicegroup_name  NAGIOS
+  alias  NAGIOS Checks
+}
+{% endif %}
+{% if hostgroup_defs['ganglia-server'] %}
+define servicegroup {
+  servicegroup_name  GANGLIA
+  alias  GANGLIA Checks
+}
+{% endif %}
+{% if hostgroup_defs['hiveserver'] %}
+define servicegroup {
+  servicegroup_name  HIVE-METASTORE
+  alias  HIVE-METASTORE Checks
+}
+{% endif %}
+{% if hostgroup_defs['zookeeper-servers'] %}
+define servicegroup {
+  servicegroup_name  ZOOKEEPER
+  alias  ZOOKEEPER Checks
+}
+{% endif %}
+define servicegroup {
+  servicegroup_name  AMBARI
+  alias  AMBARI Checks
+}
+{% if hostgroup_defs['hue-server'] %}
+define servicegroup {
+  servicegroup_name  HUE
+  alias  HUE Checks
+}
+{% endif %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/hadoop-services.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/hadoop-services.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/hadoop-services.cfg.j2
new file mode 100644
index 0000000..5941c15
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/templates/hadoop-services.cfg.j2
@@ -0,0 +1,643 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+{# TODO: Look for { or } in created file #}
+# NAGIOS SERVER Check (status log update)
+{% if hostgroup_defs['nagios-server'] %}
+define service {
+        name                            hadoop-service
+        use                             generic-service
+        notification_options            w,u,c,r,f,s
+        first_notification_delay        0
+        notification_interval           0                 # Send the notification once
+        contact_groups                  admins
+        notifications_enabled           1
+        event_handler_enabled           1
+        register                        0
+}
+
+define service {        
+        hostgroup_name          nagios-server        
+        use                     hadoop-service
+        service_description     NAGIOS::Nagios status log freshness
+        servicegroups           NAGIOS
+        check_command           check_nagios!10!/var/nagios/status.dat!{{nagios_lookup_daemon_str}}
+        normal_check_interval   5
+        retry_check_interval    0.5
+        max_check_attempts      2
+}
+
+# NAGIOS SERVER HDFS Checks
+define service {
+        hostgroup_name          nagios-server
+        use                     hadoop-service
+        service_description     HDFS::Percent DataNodes with space available
+        servicegroups           HDFS
+        check_command           check_aggregate!"DATANODE::DataNode space"!10%!30%
+        normal_check_interval   2
+        retry_check_interval    1 
+        max_check_attempts      1
+}
+
+define service {
+        hostgroup_name          nagios-server
+        use                     hadoop-service
+        service_description     HDFS::Percent DataNodes live
+        servicegroups           HDFS
+        check_command           check_aggregate!"DATANODE::DataNode process"!10%!30%
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      3
+}
+{# used only for HDP2 #}
+{% if hostgroup_defs['namenode'] and dfs_ha_enabled %}
+define service {
+        hostgroup_name          nagios-server
+        use                     hadoop-service
+        service_description     HDFS::NameNode HA Healthy
+        servicegroups           HDFS
+        check_command           check_namenodes_ha!$HOSTGROUPMEMBERS:namenode$!{{ namenode_port }}
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      5
+}
+{% endif %}
+
+# AMBARI AGENT Checks
+{% for hostname in all_hosts %}
+define service {
+        host_name	        {{ hostname }}
+        use                     hadoop-service
+        service_description     AMBARI::Ambari Agent process
+        servicegroups           AMBARI
+        check_command           check_tcp!{{all_ping_ports[loop.index-1]}}!-w 1 -c 1
+        normal_check_interval   1
+        retry_check_interval    0.25
+        max_check_attempts      4
+}
+
+{% endfor %}
+
+# NAGIOS SERVER ZOOKEEPER Checks
+{% if hostgroup_defs['zookeeper-servers'] %}
+define service {
+        hostgroup_name          nagios-server
+        use                     hadoop-service
+        service_description     ZOOKEEPER::Percent ZooKeeper Servers live
+        servicegroups           ZOOKEEPER
+        check_command           check_aggregate!"ZOOKEEPER::ZooKeeper Server process"!35%!70%
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      3
+}
+{% endif %}
+
+# NAGIOS SERVER HBASE Checks
+{% if hostgroup_defs['hbasemasters'] %}
+define service {
+        hostgroup_name          nagios-server
+        use                     hadoop-service
+        service_description     HBASE::Percent RegionServers live
+        servicegroups           HBASE
+        check_command           check_aggregate!"REGIONSERVER::RegionServer process"!10%!30%
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      3
+}
+{% endif %}
+{% endif %}
+
+
+
+# GANGLIA SERVER Checks
+{% if hostgroup_defs['ganglia-server'] %}
+define service {
+        hostgroup_name          ganglia-server
+        use                     hadoop-service
+        service_description     GANGLIA::Ganglia Server process
+        servicegroups           GANGLIA
+        check_command           check_tcp!{{ ganglia_port }}!-w 1 -c 1
+        normal_check_interval   0.25
+        retry_check_interval    0.25
+        max_check_attempts      4
+}
+
+define service {
+        hostgroup_name          ganglia-server
+        use                     hadoop-service
+        service_description     GANGLIA::Ganglia Monitor process for NameNode
+        servicegroups           GANGLIA
+        check_command           check_tcp!{{ ganglia_collector_namenode_port }}!-w 1 -c 1
+        normal_check_interval   0.25
+        retry_check_interval    0.25
+        max_check_attempts      4
+}
+
+{% if hostgroup_defs['jobtracker'] %}
+define service {
+        hostgroup_name          ganglia-server
+        use                     hadoop-service
+        service_description     GANGLIA::Ganglia Monitor process for JobTracker
+        servicegroups           GANGLIA
+        check_command           check_tcp!{{ ganglia_collector_jobtracker_port }}!-w 1 -c 1
+        normal_check_interval   0.25
+        retry_check_interval    0.25
+        max_check_attempts      4
+}
+{% endif %}
+
+{% if hostgroup_defs['hbasemasters'] %}
+define service {
+        hostgroup_name          ganglia-server
+        use                     hadoop-service
+        service_description     GANGLIA::Ganglia Monitor process for HBase Master
+        servicegroups           GANGLIA
+        check_command           check_tcp!{{ ganglia_collector_hbase_port }}!-w 1 -c 1
+        normal_check_interval   0.25
+        retry_check_interval    0.25
+        max_check_attempts      4
+}
+{% endif %}
+
+{% if hostgroup_defs['resourcemanager'] %}
+define service {
+        hostgroup_name          ganglia-server
+        use                     hadoop-service
+        service_description     GANGLIA::Ganglia Monitor process for ResourceManager
+        servicegroups           GANGLIA
+        check_command           check_tcp!{{ ganglia_collector_rm_port }}!-w 1 -c 1
+        normal_check_interval   0.25
+        retry_check_interval    0.25
+        max_check_attempts      4
+}
+{% endif %}
+
+{% if hostgroup_defs['historyserver2'] %}
+define service {
+        hostgroup_name          ganglia-server
+        use                     hadoop-service
+        service_description     GANGLIA::Ganglia Monitor process for HistoryServer
+        servicegroups           GANGLIA
+        check_command           check_tcp!{{ ganglia_collector_hs_port }}!-w 1 -c 1
+        normal_check_interval   0.25
+        retry_check_interval    0.25
+        max_check_attempts      4
+}
+{% endif %}
+
+{% endif %}
+
+{% if hostgroup_defs['snamenode'] %}
+# Secondary namenode checks
+define service {
+        hostgroup_name          snamenode
+        use                     hadoop-service
+        service_description     NAMENODE::Secondary NameNode process
+        servicegroups           HDFS
+        check_command           check_tcp!{{ snamenode_port }}!-w 1 -c 1
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      3
+}
+{% endif %}
+
+
+{% if hostgroup_defs['namenode'] %}
+# HDFS Checks
+{%  for namenode_hostname in namenode_host %}
+{# TODO: check if we can get rid of str, lower #}
+define service {
+        host_name               {{ namenode_hostname }}
+        use                     hadoop-service
+        service_description     NAMENODE::NameNode edit logs directory status on {{ namenode_hostname }}
+        servicegroups           HDFS
+        check_command           check_name_dir_status!{{ namenode_port }}!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
+        normal_check_interval   0.5
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+
+{% if env.system.platform != "suse" %}
+define service {
+        host_name               {{ namenode_hostname }}
+        use                     hadoop-service
+        service_description     NAMENODE::NameNode host CPU utilization on {{ namenode_hostname }}
+        servicegroups           HDFS
+        check_command           check_cpu!200%!250%
+        normal_check_interval   5
+        retry_check_interval    2
+        max_check_attempts      5
+}
+{% endif %}
+
+define service {
+        host_name               {{ namenode_hostname }}
+        use                     hadoop-service
+        service_description     NAMENODE::NameNode Web UI on {{ namenode_hostname }}
+        servicegroups           HDFS
+        check_command           check_webui!namenode!{{ namenode_port }}
+        normal_check_interval   1
+        retry_check_interval    1
+        max_check_attempts      3
+}
+
+define service {
+        host_name               {{ namenode_hostname }}
+        use                     hadoop-service
+        service_description     NAMENODE::NameNode process on {{ namenode_hostname }}
+        servicegroups           HDFS
+        check_command           check_tcp!{{ namenode_metadata_port }}!-w 1 -c 1
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      3
+}
+
+define service {
+        host_name               {{ namenode_hostname }}
+        use                     hadoop-service
+        service_description     HDFS::NameNode RPC latency on {{ namenode_hostname }}
+        servicegroups           HDFS
+        check_command           check_rpcq_latency!NameNode!{{ namenode_port }}!3000!5000!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
+        normal_check_interval   5
+        retry_check_interval    1
+        max_check_attempts      5
+}
+
+{%  endfor  %}
+
+define service {
+        hostgroup_name          nagios-server
+        use                     hadoop-service
+        service_description     HDFS::Blocks health
+        servicegroups           HDFS
+        check_command           check_hdfs_blocks!$HOSTGROUPMEMBERS:namenode$!{{ namenode_port }}!0%!0%!{{ nn_metrics_property }}!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
+        normal_check_interval   2
+        retry_check_interval    1 
+        max_check_attempts      1
+}
+
+define service {
+        hostgroup_name          nagios-server
+        use                     hadoop-service
+        service_description     HDFS::HDFS capacity utilization
+        servicegroups           HDFS
+        check_command           check_hdfs_capacity!$HOSTGROUPMEMBERS:namenode$!{{ namenode_port }}!80%!90%!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
+        normal_check_interval   10
+        retry_check_interval    1 
+        max_check_attempts      1
+}
+
+{% endif %}
+
+# MAPREDUCE Checks
+{# On HDP1 here are jobtracker and tasktracker alters #}
+
+{% if hostgroup_defs['resourcemanager'] %}
+# YARN::RESOURCEMANAGER Checks 
+define service {
+        hostgroup_name          resourcemanager
+        use                     hadoop-service
+        service_description     RESOURCEMANAGER::ResourceManager Web UI
+        servicegroups           YARN
+        check_command           check_webui!resourcemanager!{{ rm_port }}
+        normal_check_interval   1
+        retry_check_interval    1
+        max_check_attempts      3
+}
+
+{% if env.system.platform != "suse" %}
+define service {
+        hostgroup_name          resourcemanager
+        use                     hadoop-service
+        service_description     RESOURCEMANAGER::ResourceManager CPU utilization
+        servicegroups           YARN
+        check_command           check_cpu!200%!250%
+        normal_check_interval   5
+        retry_check_interval    2 
+        max_check_attempts      5
+}
+{% endif %}
+
+define service {
+        hostgroup_name          resourcemanager
+        use                     hadoop-service
+        service_description     RESOURCEMANAGER::ResourceManager RPC latency
+        servicegroups           YARN
+        check_command           check_rpcq_latency!ResourceManager!{{ rm_port }}!3000!5000!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
+        normal_check_interval   5
+        retry_check_interval    1 
+        max_check_attempts      5
+}
+
+define service {
+        hostgroup_name          resourcemanager
+        use                     hadoop-service
+        service_description     RESOURCEMANAGER::ResourceManager process
+        servicegroups           YARN
+        check_command           check_tcp!{{ rm_port }}!-w 1 -c 1
+        normal_check_interval   1
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+{%  endif %}
+
+{% if hostgroup_defs['nodemanagers'] %}
+# YARN::NODEMANAGER Checks
+define service {
+        hostgroup_name          nodemanagers
+        use                     hadoop-service
+        service_description     NODEMANAGER::NodeManager process
+        servicegroups           YARN
+        check_command           check_tcp!{{ nm_port }}!-w 1 -c 1
+        normal_check_interval   1
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+
+define service {
+        hostgroup_name          nodemanagers
+        use                     hadoop-service
+        service_description     NODEMANAGER::NodeManager health
+        servicegroups           YARN
+        check_command           check_nodemanager_health!{{ nm_port }}!{{ str(security_enabled).lower() }}!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}
+        normal_check_interval   1
+        retry_check_interval    1
+        max_check_attempts      3
+}
+define service {
+        hostgroup_name          nagios-server
+        use                     hadoop-service
+        service_description     NODEMANAGER::Percent NodeManagers live
+        servicegroups           YARN
+        check_command           check_aggregate!"NODEMANAGER::NodeManager process"!10%!30%
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      3
+}
+{%  endif %}
+
+{% if hostgroup_defs['historyserver2'] %}
+# MAPREDUCE::JOBHISTORY Checks
+define service {
+        hostgroup_name          historyserver2
+        use                     hadoop-service
+        service_description     JOBHISTORY::HistoryServer Web UI
+        servicegroups           MAPREDUCE
+        check_command           check_webui!historyserver2!{{ hs_port }}
+        normal_check_interval   1
+        retry_check_interval    1
+        max_check_attempts      3
+}
+
+{% if env.system.platform != "suse" %}
+define service {
+        hostgroup_name          historyserver2
+        use                     hadoop-service
+        service_description     JOBHISTORY::HistoryServer CPU utilization
+        servicegroups           MAPREDUCE
+        check_command           check_cpu!200%!250%
+        normal_check_interval   5
+        retry_check_interval    2 
+        max_check_attempts      5
+}
+{%  endif %}
+
+define service {
+        hostgroup_name          historyserver2
+        use                     hadoop-service
+        service_description     JOBHISTORY::HistoryServer RPC latency
+        servicegroups           MAPREDUCE
+        check_command           check_rpcq_latency!JobHistoryServer!{{ hs_port }}!3000!5000!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
+        normal_check_interval   5
+        retry_check_interval    1 
+        max_check_attempts      5
+}
+
+define service {
+        hostgroup_name          historyserver2
+        use                     hadoop-service
+        service_description     JOBHISTORY::HistoryServer process
+        servicegroups           MAPREDUCE
+        check_command           check_tcp!{{ hs_port }}!-w 1 -c 1
+        normal_check_interval   1
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+
+{%  endif %}
+
+{% if hostgroup_defs['journalnodes'] %}
+# Journalnode checks
+define service {
+        hostgroup_name          journalnodes
+        use                     hadoop-service
+        service_description     JOURNALNODE::JournalNode process
+        servicegroups           HDFS
+        check_command           check_tcp!{{ journalnode_port }}!-w 1 -c 1
+        normal_check_interval   1
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+
+{% if dfs_ha_enabled %}
+define service {
+        hostgroup_name          nagios-server
+        use                     hadoop-service
+        service_description     HDFS::Percent JournalNodes live
+        servicegroups           HDFS
+        check_command           check_aggregate!"JOURNALNODE::JournalNode process"!33%!50%
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      3
+}
+{% endif %}
+{% endif %}
+
+{% if hostgroup_defs['slaves'] %}
+# HDFS::DATANODE Checks
+define service {
+        hostgroup_name          slaves
+        use                     hadoop-service
+        service_description     DATANODE::DataNode process
+        servicegroups           HDFS
+        check_command           check_tcp!{{datanode_port}}!-w 1 -c 1
+        normal_check_interval   1
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+
+define service {
+        hostgroup_name          slaves
+        use                     hadoop-service
+        service_description     DATANODE::DataNode space
+        servicegroups           HDFS
+        check_command           check_datanode_storage!{{ datanode_port }}!90%!90%!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
+        normal_check_interval   5
+        retry_check_interval    1
+        max_check_attempts      2
+}
+
+{% endif %}
+
+{% if hostgroup_defs['flume-servers'] %}
+# FLUME Checks
+define service {
+        hostgroup_name          flume-servers
+        use                     hadoop-service
+        service_description     FLUME::Flume Agent process
+        servicegroups           FLUME
+        check_command           check_tcp!{{ flume_port }}!-w 1 -c 1
+        normal_check_interval   1
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+{% endif %}
+
+
+{% if hostgroup_defs['zookeeper-servers'] %}
+# ZOOKEEPER Checks
+define service {
+        hostgroup_name          zookeeper-servers
+        use                     hadoop-service
+        service_description     ZOOKEEPER::ZooKeeper Server process
+        servicegroups           ZOOKEEPER
+        check_command           check_tcp!{{ clientPort }}!-w 1 -c 1
+        normal_check_interval   1
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+{% endif %}
+
+{% if hostgroup_defs['hbasemasters'] %}
+# HBASE::REGIONSERVER Checks
+define service {
+        hostgroup_name          region-servers
+        use                     hadoop-service
+        service_description     REGIONSERVER::RegionServer process
+        servicegroups           HBASE
+        check_command           check_tcp!{{ hbase_rs_port }}!-w 1 -c 1
+        normal_check_interval   1
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+
+{# HBASE:: MASTER Checks
+# define service {
+#         hostgroup_name          hbasemasters
+#         use                     hadoop-service
+#         service_description     HBASEMASTER::HBase Master Web UI
+#         servicegroups           HBASE
+#         check_command           check_webui!hbase!{{ hbase_master_port }}
+#         normal_check_interval   1
+#         retry_check_interval    1
+#         max_check_attempts      3
+# #}
+{%  for hbasemaster in hbase_master_hosts  %}
+{% if env.system.platform != "suse" %}
+define service {
+        host_name               {{ hbasemaster }}
+        use                     hadoop-service
+        service_description     HBASEMASTER::HBase Master CPU utilization on {{ hbasemaster }}
+        servicegroups           HBASE
+        check_command           check_cpu!200%!250%
+        normal_check_interval   5
+        retry_check_interval    2 
+        max_check_attempts      5
+}
+{%  endif %}
+define service {
+        host_name               {{ hbasemaster }}
+        use                     hadoop-service
+        service_description     HBASEMASTER::HBase Master process on {{ hbasemaster }}
+        servicegroups           HBASE
+        check_command           check_tcp!{{ hbase_master_rpc_port }}!-w 1 -c 1
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      4
+}
+{% endfor %}
+{% endif %}
+
+{% if hostgroup_defs['hiveserver'] %}
+# HIVE Metastore check
+define service {
+        hostgroup_name          hiveserver
+        use                     hadoop-service
+        service_description     HIVE-METASTORE::Hive Metastore status
+        servicegroups           HIVE-METASTORE
+        {% if security_enabled %}
+        check_command           check_hive_metastore_status!{{ hive_metastore_port }}!{{ java64_home }}!true!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}
+        {% else %}
+        check_command           check_hive_metastore_status!{{ hive_metastore_port }}!{{ java64_home }}!false
+        {% endif %}
+        normal_check_interval   0.5
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+{% endif %}
+{% if hostgroup_defs['oozie-server'] %}
+# Oozie check
+define service {
+        hostgroup_name          oozie-server
+        use                     hadoop-service
+        service_description     OOZIE::Oozie Server status
+        servicegroups           OOZIE
+        {% if security_enabled %}
+        check_command           check_oozie_status!{{ oozie_server_port }}!{{ java64_home }}!true!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}
+        {% else %}
+        check_command           check_oozie_status!{{ oozie_server_port }}!{{ java64_home }}!false
+        {% endif %}
+        normal_check_interval   1
+        retry_check_interval    1
+        max_check_attempts      3
+}
+{% endif %}
+{% if hostgroup_defs['webhcat-server'] %}
+# WEBHCAT check
+define service {
+        hostgroup_name          webhcat-server
+        use                     hadoop-service
+        service_description     WEBHCAT::WebHCat Server status
+        servicegroups           WEBHCAT 
+        {% if security_enabled %}
+        check_command           check_templeton_status!{{ templeton_port }}!v1!{{ str(security_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}
+        {% else %}
+        check_command           check_templeton_status!{{ templeton_port }}!v1!false
+        {% endif %}
+        normal_check_interval   1
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+{% endif %}
+
+{% if hostgroup_defs['hue-server'] %}
+define service {
+        hostgroup_name          hue-server
+        use                     hadoop-service
+        service_description     HUE::Hue Server status
+        servicegroups           HUE
+        check_command           check_hue_status
+        normal_check_interval   100
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+{% endif %}
+


[18/37] AMBARI-4341. Rename 2.0.8 to 2.1.1 in the stack definition. (mahadev)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/scripts/storm.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/scripts/storm.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/scripts/storm.py
deleted file mode 100644
index d38909c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/scripts/storm.py
+++ /dev/null
@@ -1,39 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from yaml_config import yaml_config
-import sys
-
-def storm():
-  import params
-
-  Directory([params.log_dir, params.pid_dir, params.local_dir],
-            owner=params.storm_user,
-            group=params.user_group,
-            recursive=True
-  )
-
-  yaml_config( "storm.yaml",
-               conf_dir = params.conf_dir,
-               configurations = params.config['configurations']['storm-site'],
-               owner = params.storm_user,
-               group = params.user_group
-  )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/scripts/supervisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/scripts/supervisor.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/scripts/supervisor.py
deleted file mode 100644
index eafb48f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/scripts/supervisor.py
+++ /dev/null
@@ -1,63 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-from yaml_config import yaml_config
-from storm import storm
-from service import service
-
-
-class Supervisor(Script):
-  def install(self, env):
-    self.install_packages(env)
-    # TODO remove
-    Execute("yum install http://s3.amazonaws.com/dev.hortonworks.com/storm/storm-0.9.1.2.0.6.1-1.el6.noarch.rpm -y",
-            ignore_failures = True)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    storm()
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env)
-
-    service("supervisor", action="start")
-
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    service("supervisor", action="stop")
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-
-    check_process_status(status_params.pid_supervisor)
-
-
-if __name__ == "__main__":
-  Supervisor().execute()
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/scripts/ui_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/scripts/ui_server.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/scripts/ui_server.py
deleted file mode 100644
index 58deec6..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/scripts/ui_server.py
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-from storm import storm
-from service import service
-from service_check import ServiceCheck
-
-
-class UiServer(Script):
-  def install(self, env):
-    self.install_packages(env)
-    # TODO remove
-    Execute("yum install http://s3.amazonaws.com/dev.hortonworks.com/storm/storm-0.9.1.2.0.6.1-1.el6.noarch.rpm -y",
-            ignore_failures = True)
-
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    storm()
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env)
-
-    service("ui", action="start")
-
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    service("ui", action="stop")
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.pid_ui)
-
-if __name__ == "__main__":
-  UiServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/scripts/yaml_config.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/scripts/yaml_config.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/scripts/yaml_config.py
deleted file mode 100644
index 1f56486..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/scripts/yaml_config.py
+++ /dev/null
@@ -1,49 +0,0 @@
-import re
-from resource_management import *
-
-def escape_yaml_propetry(value):
-  unquouted = False
-  unquouted_values = ["null","Null","NULL","true","True","TRUE","false","False","FALSE","YES","Yes","yes","NO","No","no","ON","On","on","OFF","Off","off"]
-  
-  if value in unquouted_values:
-    unquouted = True
-
-  # if is list [a,b,c]
-  if re.match('^\w*\[.+\]\w*$', value):
-    unquouted = True
-    
-  try:
-    int(value)
-    unquouted = True
-  except ValueError:
-    pass
-  
-  try:
-    float(value)
-    unquouted = True
-  except ValueError:
-    pass
-  
-  if not unquouted:
-    value = value.replace("'","''")
-    value = "'"+value+"'"
-    
-  return value
-
-def yaml_config(
-  filename,
-  configurations = None,
-  conf_dir = None,
-  mode = None,
-  owner = None,
-  group = None
-):
-    config_content = InlineTemplate('''{% for key, value in configurations_dict.items() %}{{ key }}: {{ escape_yaml_propetry(value) }}
-{% endfor %}''', configurations_dict=configurations, extra_imports=[escape_yaml_propetry])
-
-    File (format("{conf_dir}/{filename}"),
-      content = config_content,
-      owner = owner,
-      group = group,
-      mode = mode
-    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/WEBHCAT/configuration/webhcat-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/WEBHCAT/configuration/webhcat-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/WEBHCAT/configuration/webhcat-site.xml
deleted file mode 100644
index 39b901e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/WEBHCAT/configuration/webhcat-site.xml
+++ /dev/null
@@ -1,126 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!-- 
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<!-- The default settings for Templeton. -->
-<!-- Edit templeton-site.xml to change settings for your local -->
-<!-- install. -->
-
-<configuration>
-
-  <property>
-    <name>templeton.port</name>
-      <value>50111</value>
-    <description>The HTTP port for the main server.</description>
-  </property>
-
-  <property>
-    <name>templeton.hadoop.conf.dir</name>
-    <value>/etc/hadoop/conf</value>
-    <description>The path to the Hadoop configuration.</description>
-  </property>
-
-  <property>
-    <name>templeton.jar</name>
-    <value>/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar</value>
-    <description>The path to the Templeton jar file.</description>
-  </property>
-
-  <property>
-    <name>templeton.libjars</name>
-    <value>/usr/lib/zookeeper/zookeeper.jar</value>
-    <description>Jars to add the the classpath.</description>
-  </property>
-
-
-  <property>
-    <name>templeton.hadoop</name>
-    <value>/usr/bin/hadoop</value>
-    <description>The path to the Hadoop executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.pig.archive</name>
-    <value>hdfs:///apps/webhcat/pig.tar.gz</value>
-    <description>The path to the Pig archive.</description>
-  </property>
-
-  <property>
-    <name>templeton.pig.path</name>
-    <value>pig.tar.gz/pig/bin/pig</value>
-    <description>The path to the Pig executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hcat</name>
-    <value>/usr/bin/hcat</value>
-    <description>The path to the hcatalog executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.archive</name>
-    <value>hdfs:///apps/webhcat/hive.tar.gz</value>
-    <description>The path to the Hive archive.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.path</name>
-    <value>hive.tar.gz/hive/bin/hive</value>
-    <description>The path to the Hive executable.</description>
-  </property>
-
-  <property>
-    <name>templeton.hive.properties</name>
-    <value></value>
-    <description>Properties to set when running hive.</description>
-  </property>
-
-
-  <property>
-    <name>templeton.zookeeper.hosts</name>
-    <value>localhost:2181</value>
-    <description>ZooKeeper servers, as comma separated host:port pairs</description>
-  </property>
-
-  <property>
-    <name>templeton.storage.class</name>
-    <value>org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage</value>
-    <description>The class to use as storage</description>
-  </property>
-
-  <property>
-   <name>templeton.override.enabled</name>
-   <value>false</value>
-   <description>
-     Enable the override path in templeton.override.jars
-   </description>
- </property>
-
- <property>
-    <name>templeton.streaming.jar</name>
-    <value>hdfs:///apps/webhcat/hadoop-streaming.jar</value>
-    <description>The hdfs path to the Hadoop streaming jar file.</description>
-  </property> 
-
-  <property>
-    <name>templeton.exec.timeout</name>
-    <value>60000</value>
-    <description>Time out for templeton api</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/WEBHCAT/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/WEBHCAT/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/WEBHCAT/metainfo.xml
deleted file mode 100644
index e9acd52..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/WEBHCAT/metainfo.xml
+++ /dev/null
@@ -1,65 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>WEBHCAT</name>
-      <comment>This is comment for WEBHCAT service</comment>
-      <version>0.12.0.2.0.6.0</version>
-      <components>
-        <component>
-          <name>WEBHCAT_SERVER</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/webhcat_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>hcatalog</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>webhcat-tar-hive</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>webhcat-tar-pig</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-      <configuration-dependencies>
-        <config-type>webhcat-site</config-type>
-      </configuration-dependencies>
-    </service>
-  </services>
-</metainfo>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/WEBHCAT/package/files/templetonSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/WEBHCAT/package/files/templetonSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/WEBHCAT/package/files/templetonSmoke.sh
deleted file mode 100644
index cefc4f0..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/WEBHCAT/package/files/templetonSmoke.sh
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-export ttonhost=$1
-export smoke_test_user=$2
-export smoke_user_keytab=$3
-export security_enabled=$4
-export kinit_path_local=$5
-export ttonurl="http://${ttonhost}:50111/templeton/v1"
-
-if [[ $security_enabled == "true" ]]; then
-  kinitcmd="${kinit_path_local}  -kt ${smoke_user_keytab} ${smoke_test_user}; "
-else
-  kinitcmd=""
-fi
-
-cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>'    $ttonurl/status 2>&1"
-retVal=`su - ${smoke_test_user} -c "$cmd"`
-httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
-
-if [[ "$httpExitCode" -ne "200" ]] ; then
-  echo "Templeton Smoke Test (status cmd): Failed. : $retVal"
-  export TEMPLETON_EXIT_CODE=1
-  exit 1
-fi
-
-exit 0
-
-#try hcat ddl command
-echo "user.name=${smoke_test_user}&exec=show databases;" /tmp/show_db.post.txt
-cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>' -d  \@${destdir}/show_db.post.txt  $ttonurl/ddl 2>&1"
-retVal=`su - ${smoke_test_user} -c "$cmd"`
-httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
-
-if [[ "$httpExitCode" -ne "200" ]] ; then
-  echo "Templeton Smoke Test (ddl cmd): Failed. : $retVal"
-  export TEMPLETON_EXIT_CODE=1
-  exit  1
-fi
-
-# NOT SURE?? SUHAS
-if [[ $security_enabled == "true" ]]; then
-  echo "Templeton Pig Smoke Tests not run in secure mode"
-  exit 0
-fi
-
-#try pig query
-outname=${smoke_test_user}.`date +"%M%d%y"`.$$;
-ttonTestOutput="/tmp/idtest.${outname}.out";
-ttonTestInput="/tmp/idtest.${outname}.in";
-ttonTestScript="idtest.${outname}.pig"
-
-echo "A = load '$ttonTestInput' using PigStorage(':');"  > /tmp/$ttonTestScript
-echo "B = foreach A generate \$0 as id; " >> /tmp/$ttonTestScript
-echo "store B into '$ttonTestOutput';" >> /tmp/$ttonTestScript
-
-#copy pig script to hdfs
-su - ${smoke_test_user} -c "hadoop dfs -copyFromLocal /tmp/$ttonTestScript /tmp/$ttonTestScript"
-
-#copy input file to hdfs
-su - ${smoke_test_user} -c "hadoop dfs -copyFromLocal /etc/passwd $ttonTestInput"
-
-#create, copy post args file
-echo -n "user.name=${smoke_test_user}&file=/tmp/$ttonTestScript" > /tmp/pig_post.txt
-
-#submit pig query
-cmd="curl -s -w 'http_code <%{http_code}>' -d  \@${destdir}/pig_post.txt  $ttonurl/pig 2>&1"
-retVal=`su - ${smoke_test_user} -c "$cmd"`
-httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
-if [[ "$httpExitCode" -ne "200" ]] ; then
-  echo "Templeton Smoke Test (pig cmd): Failed. : $retVal"
-  export TEMPLETON_EXIT_CODE=1
-  exit 1
-fi
-
-exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/WEBHCAT/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/WEBHCAT/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/WEBHCAT/package/scripts/__init__.py
deleted file mode 100644
index a582077..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/WEBHCAT/package/scripts/__init__.py
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/WEBHCAT/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/WEBHCAT/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/WEBHCAT/package/scripts/params.py
deleted file mode 100644
index 60b52a7..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/WEBHCAT/package/scripts/params.py
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-import status_params
-
-# server configurations
-config = Script.get_config()
-
-webhcat_user = config['configurations']['global']['webhcat_user']
-download_url = config['configurations']['global']['apache_artifacts_download_url']
-
-config_dir = '/etc/hcatalog/conf'
-
-templeton_log_dir = config['configurations']['global']['hcat_log_dir']
-templeton_pid_dir = status_params.templeton_pid_dir
-
-pid_file = status_params.pid_file
-
-hadoop_conf_dir = config['configurations']['webhcat-site']['templeton.hadoop.conf.dir']
-templeton_jar = config['configurations']['webhcat-site']['templeton.jar']
-
-hadoop_home = '/usr'
-user_group = config['configurations']['global']['user_group']
-
-webhcat_server_host = config['clusterHostInfo']['webhcat_server_host']
-
-webhcat_apps_dir = "/apps/webhcat"
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-smokeuser = config['configurations']['global']['smokeuser']
-security_enabled = config['configurations']['global']['security_enabled']
-kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/WEBHCAT/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/WEBHCAT/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/WEBHCAT/package/scripts/service_check.py
deleted file mode 100644
index 58b4d25..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/WEBHCAT/package/scripts/service_check.py
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-from resource_management import *
-
-class WebHCatServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    File('/tmp/templetonSmoke.sh',
-         content= StaticFile('templetonSmoke.sh'),
-         mode=0755
-    )
-
-    cmd = format("sh /tmp/templetonSmoke.sh {webhcat_server_host[0]} {smokeuser} {smokeuser_keytab}"
-                 " {security_enabled} {kinit_path_local}",
-                 smokeuser_keytab=params.smoke_user_keytab if params.security_enabled else "no_keytab")
-
-    Execute(cmd,
-            tries=3,
-            try_sleep=5,
-            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-            logoutput=True)
-
-if __name__ == "__main__":
-  WebHCatServiceCheck().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/WEBHCAT/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/WEBHCAT/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/WEBHCAT/package/scripts/status_params.py
deleted file mode 100644
index 21dde6f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/WEBHCAT/package/scripts/status_params.py
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-templeton_pid_dir = config['configurations']['global']['hcat_pid_dir']
-pid_file = format('{templeton_pid_dir}/webhcat.pid')

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/WEBHCAT/package/scripts/webhcat.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/WEBHCAT/package/scripts/webhcat.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/WEBHCAT/package/scripts/webhcat.py
deleted file mode 100644
index c013624..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/WEBHCAT/package/scripts/webhcat.py
+++ /dev/null
@@ -1,120 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-from resource_management import *
-import sys
-
-
-def webhcat():
-  import params
-
-  Directory(params.templeton_pid_dir,
-            owner=params.webhcat_user,
-            mode=0755,
-            group=params.user_group,
-            recursive=True)
-
-  Directory(params.templeton_log_dir,
-            owner=params.webhcat_user,
-            mode=0755,
-            group=params.user_group,
-            recursive=True)
-
-  Directory(params.config_dir,
-            owner=params.webhcat_user,
-            group=params.user_group)
-
-  XmlConfig("webhcat-site.xml",
-            conf_dir=params.config_dir,
-            configurations=params.config['configurations']['webhcat-site'],
-            owner=params.webhcat_user,
-            group=params.user_group,
-  )
-
-  File(format("{config_dir}/webhcat-env.sh"),
-       owner=params.webhcat_user,
-       group=params.user_group,
-       content=Template('webhcat-env.sh.j2')
-  )
-
-  if params.security_enabled:
-    kinit_if_needed = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser};")
-  else:
-    kinit_if_needed = ""
-
-  if kinit_if_needed:
-    Execute(kinit_if_needed,
-            user=params.webhcat_user,
-            path='/bin'
-    )
-
-  copyFromLocal(path='/usr/lib/hadoop-mapreduce/hadoop-streaming*.jar',
-                owner=params.webhcat_user,
-                mode=0755,
-                dest_dir=format("{webhcat_apps_dir}/hadoop-streaming.jar"),
-                kinnit_if_needed=kinit_if_needed
-  )
-
-  copyFromLocal(path='/usr/share/HDP-webhcat/pig.tar.gz',
-                owner=params.webhcat_user,
-                mode=0755,
-                dest_dir=format("{webhcat_apps_dir}/pig.tar.gz"),
-  )
-
-  copyFromLocal(path='/usr/share/HDP-webhcat/hive.tar.gz',
-                owner=params.webhcat_user,
-                mode=0755,
-                dest_dir=format("{webhcat_apps_dir}/hive.tar.gz")
-  )
-
-
-def copyFromLocal(path=None, owner=None, group=None, mode=None, dest_dir=None, kinnit_if_needed=""):
-  import params
-
-  copy_cmd = format("fs -copyFromLocal {path} {dest_dir}")
-  unless_cmd = format("{kinnit_if_needed} hadoop fs -ls {dest_dir} >/dev/null 2>&1")
-
-  ExecuteHadoop(copy_cmd,
-                not_if=unless_cmd,
-                user=owner,
-                conf_dir=params.hadoop_conf_dir)
-
-  if not owner:
-    chown = None
-  else:
-    if not group:
-      chown = owner
-    else:
-      chown = format('{owner}:{group}')
-
-  if not chown:
-    chown_cmd = format("fs -chown {chown} {dest_dir}")
-
-    ExecuteHadoop(copy_cmd,
-                  user=owner,
-                  conf_dir=params.hadoop_conf_dir)
-
-  if not mode:
-    chmod_cmd = format('fs -chmod {mode} {dest_dir}')
-
-    ExecuteHadoop(chmod_cmd,
-                  user=owner,
-                  conf_dir=params.hadoop_conf_dir)

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/WEBHCAT/package/scripts/webhcat_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/WEBHCAT/package/scripts/webhcat_server.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/WEBHCAT/package/scripts/webhcat_server.py
deleted file mode 100644
index 4365111..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/WEBHCAT/package/scripts/webhcat_server.py
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-import sys
-from resource_management import *
-
-from webhcat import webhcat
-from webhcat_service import webhcat_service
-
-class WebHCatServer(Script):
-  def install(self, env):
-    self.install_packages(env)
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    webhcat()
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env) # FOR SECURITY
-    webhcat_service(action = 'start')
-
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    webhcat_service(action = 'stop')
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.pid_file)
-
-if __name__ == "__main__":
-  WebHCatServer().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/WEBHCAT/package/scripts/webhcat_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/WEBHCAT/package/scripts/webhcat_service.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/WEBHCAT/package/scripts/webhcat_service.py
deleted file mode 100644
index 12c3854..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/WEBHCAT/package/scripts/webhcat_service.py
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-from resource_management import *
-
-def webhcat_service(action='start'):
-  import params
-
-  cmd = format('env HADOOP_HOME={hadoop_home} /usr/lib/hcatalog/sbin/webhcat_server.sh')
-
-  if action == 'start':
-    demon_cmd = format('{cmd} start')
-    no_op_test = format('ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1')
-    Execute(demon_cmd,
-            user=params.webhcat_user,
-            not_if=no_op_test
-    )
-  elif action == 'stop':
-    demon_cmd = format('{cmd} stop')
-    Execute(demon_cmd,
-            user=params.webhcat_user
-    )
-    Execute(format('rm -f {pid_file}'))

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/WEBHCAT/package/templates/webhcat-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/WEBHCAT/package/templates/webhcat-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/WEBHCAT/package/templates/webhcat-env.sh.j2
deleted file mode 100644
index 9ea4a79..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/WEBHCAT/package/templates/webhcat-env.sh.j2
+++ /dev/null
@@ -1,44 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-# The file containing the running pid
-PID_FILE={{pid_file}}
-
-TEMPLETON_LOG_DIR={{templeton_log_dir}}/
-
-
-WEBHCAT_LOG_DIR={{templeton_log_dir}}/
-
-# The console error log
-ERROR_LOG={{templeton_log_dir}}/webhcat-console-error.log
-
-# The console log
-CONSOLE_LOG={{templeton_log_dir}}/webhcat-console.log
-
-#TEMPLETON_JAR=templeton_jar_name
-
-#HADOOP_PREFIX=hadoop_prefix
-
-#HCAT_PREFIX=hive_prefix
-
-# Set HADOOP_HOME to point to a specific hadoop install directory
-export HADOOP_HOME=/usr/lib/hadoop

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/configuration/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/configuration/capacity-scheduler.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/configuration/capacity-scheduler.xml
deleted file mode 100644
index 4a19779..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/configuration/capacity-scheduler.xml
+++ /dev/null
@@ -1,128 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<configuration>
-
-  <property>
-    <name>yarn.scheduler.capacity.maximum-applications</name>
-    <value>10000</value>
-    <description>
-      Maximum number of applications that can be pending and running.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
-    <value>0.2</value>
-    <description>
-      Maximum percent of resources in the cluster which can be used to run 
-      application masters i.e. controls number of concurrent running
-      applications.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.queues</name>
-    <value>default</value>
-    <description>
-      The queues at the this level (root is the root queue).
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.capacity</name>
-    <value>100</value>
-    <description>
-      The total capacity as a percentage out of 100 for this queue.
-      If it has child queues then this includes their capacity as well.
-      The child queues capacity should add up to their parent queue's capacity
-      or less.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.capacity</name>
-    <value>100</value>
-    <description>Default queue target capacity.</description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
-    <value>1</value>
-    <description>
-      Default queue user limit a percentage from 0.0 to 1.0.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
-    <value>100</value>
-    <description>
-      The maximum capacity of the default queue. 
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.state</name>
-    <value>RUNNING</value>
-    <description>
-      The state of the default queue. State can be one of RUNNING or STOPPED.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.acl_submit_jobs</name>
-    <value>*</value>
-    <description>
-      The ACL of who can submit jobs to the default queue.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.default.acl_administer_jobs</name>
-    <value>*</value>
-    <description>
-      The ACL of who can administer jobs on the default queue.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.capacity.root.acl_administer_queues</name>
-    <value>*</value>
-    <description>
-      The ACL for who can administer this queue i.e. change sub-queue 
-      allocations.
-    </description>
-  </property>
-  
-  <property>
-    <name>yarn.scheduler.capacity.root.unfunded.capacity</name>
-    <value>50</value>
-    <description>
-      No description
-    </description>
-  </property>
-  <property>
-    <name>yarn.scheduler.capacity.node-locality-delay</name>
-    <value>40</value>
-    <description>
-      No description
-    </description>
-  </property>
-
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/configuration/core-site.xml
deleted file mode 100644
index 3a2af49..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/configuration/core-site.xml
+++ /dev/null
@@ -1,20 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<configuration>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/configuration/global.xml
deleted file mode 100644
index 429c39f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/configuration/global.xml
+++ /dev/null
@@ -1,88 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>rm_host</name>
-    <value></value>
-    <description>ResourceManager.</description>
-  </property>
-  <property>
-    <name>nm_hosts</name>
-    <value></value>
-    <description>List of NodeManager Hosts.</description>
-  </property>
-  <property>
-    <name>yarn_log_dir_prefix</name>
-    <value>/var/log/hadoop-yarn</value>
-    <description>YARN Log Dir Prefix</description>
-  </property>
-  <property>
-    <name>yarn_pid_dir_prefix</name>
-    <value>/var/run/hadoop-yarn</value>
-    <description>YARN PID Dir Prefix</description>
-  </property>
-  <property>
-    <name>yarn_user</name>
-    <value>yarn</value>
-    <description>YARN User</description>
-  </property>
-  <property>
-    <name>yarn_heapsize</name>
-    <value>1024</value>
-    <description>Max heapsize for all YARN components using a numerical value in the scale of MB</description>
-  </property>
-  <property>
-    <name>resourcemanager_heapsize</name>
-    <value>1024</value>
-    <description>Max heapsize for ResourceManager using a numerical value in the scale of MB</description>
-  </property>
-  <property>
-    <name>nodemanager_heapsize</name>
-    <value>1024</value>
-    <description>Max heapsize for NodeManager using a numerical value in the scale of MB</description>
-  </property>
-
-  <!--MAPREDUCE2-->
-
-  <property>
-    <name>hs_host</name>
-    <value></value>
-    <description>History Server.</description>
-  </property>
-  <property>
-    <name>mapred_log_dir_prefix</name>
-    <value>/var/log/hadoop-mapreduce</value>
-    <description>Mapreduce Log Dir Prefix</description>
-  </property>
-  <property>
-    <name>mapred_pid_dir_prefix</name>
-    <value>/var/run/hadoop-mapreduce</value>
-    <description>Mapreduce PID Dir Prefix</description>
-  </property>
-  <property>
-    <name>mapred_user</name>
-    <value>mapred</value>
-    <description>Mapreduce User</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/configuration/mapred-queue-acls.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/configuration/mapred-queue-acls.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/configuration/mapred-queue-acls.xml
deleted file mode 100644
index ce12380..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/configuration/mapred-queue-acls.xml
+++ /dev/null
@@ -1,39 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- mapred-queue-acls.xml -->
-<configuration>
-
-
-<!-- queue default -->
-
-  <property>
-    <name>mapred.queue.default.acl-submit-job</name>
-    <value>*</value>
-  </property>
-
-  <property>
-    <name>mapred.queue.default.acl-administer-jobs</name>
-    <value>*</value>
-  </property>
-
-  <!-- END ACLs -->
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/configuration/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/configuration/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/configuration/mapred-site.xml
deleted file mode 100644
index 424d216..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/configuration/mapred-site.xml
+++ /dev/null
@@ -1,381 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-<!-- i/o properties -->
-
-  <property>
-    <name>mapreduce.task.io.sort.mb</name>
-    <value>200</value>
-    <description>
-      The total amount of buffer memory to use while sorting files, in megabytes.
-      By default, gives each merge stream 1MB, which should minimize seeks.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.map.sort.spill.percent</name>
-    <value>0.7</value>
-    <description>
-      The soft limit in the serialization buffer. Once reached, a thread will
-      begin to spill the contents to disk in the background. Note that
-      collection will not block if this threshold is exceeded while a spill
-      is already in progress, so spills may be larger than this threshold when
-      it is set to less than .5
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.task.io.sort.factor</name>
-    <value>100</value>
-    <description>
-      The number of streams to merge at once while sorting files.
-      This determines the number of open file handles.
-    </description>
-  </property>
-
-<!-- map/reduce properties -->
-  <property>
-    <name>mapreduce.cluster.administrators</name>
-    <value> hadoop</value>
-    <description>
-      Administrators for MapReduce applications.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.shuffle.parallelcopies</name>
-    <value>30</value>
-    <description>
-      The default number of parallel transfers run by reduce during
-      the copy(shuffle) phase.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.map.speculative</name>
-    <value>false</value>
-    <description>
-      If true, then multiple instances of some map tasks
-      may be executed in parallel.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.speculative</name>
-    <value>false</value>
-    <description>
-      If true, then multiple instances of some reduce tasks may be
-      executed in parallel.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.job.reduce.slowstart.completedmaps</name>
-    <value>0.05</value>
-    <description>
-      Fraction of the number of maps in the job which should be complete before
-      reduces are scheduled for the job.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.shuffle.merge.percent</name>
-    <value>0.66</value>
-    <description>
-      The usage threshold at which an in-memory merge will be
-      initiated, expressed as a percentage of the total memory allocated to
-      storing in-memory map outputs, as defined by
-      mapreduce.reduce.shuffle.input.buffer.percent.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.shuffle.input.buffer.percent</name>
-    <value>0.7</value>
-    <description>
-      The percentage of memory to be allocated from the maximum heap
-      size to storing map outputs during the shuffle.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.map.output.compress.codec</name>
-    <value></value>
-    <description>If the map outputs are compressed, how should they be
-      compressed
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.output.fileoutputformat.compress.type</name>
-    <value>BLOCK</value>
-    <description>
-      If the job outputs are to compressed as SequenceFiles, how should
-      they be compressed? Should be one of NONE, RECORD or BLOCK.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.input.buffer.percent</name>
-    <value>0.0</value>
-    <description>
-      The percentage of memory- relative to the maximum heap size- to
-      retain map outputs during the reduce. When the shuffle is concluded, any
-      remaining map outputs in memory must consume less than this threshold before
-      the reduce can begin.
-    </description>
-  </property>
-
-  <!-- copied from kryptonite configuration -->
-  <property>
-    <name>mapreduce.map.output.compress</name>
-    <value>false</value>
-  </property>
-
-  <property>
-    <name>mapreduce.task.timeout</name>
-    <value>300000</value>
-    <description>
-      The number of milliseconds before a task will be
-      terminated if it neither reads an input, writes an output, nor
-      updates its status string.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.map.memory.mb</name>
-    <value>1024</value>
-    <description>Virtual memory for single Map task</description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.memory.mb</name>
-    <value>1024</value>
-    <description>Virtual memory for single Reduce task</description>
-  </property>
-
-  <property>
-    <name>mapreduce.jobhistory.keytab.file</name>
-    <!-- cluster variant -->
-    <value></value>
-    <description>The keytab for the job history server principal.</description>
-  </property>
-
-  <property>
-    <name>mapreduce.shuffle.port</name>
-    <value>13562</value>
-    <description>
-      Default port that the ShuffleHandler will run on.
-      ShuffleHandler is a service run at the NodeManager to facilitate
-      transfers of intermediate Map outputs to requesting Reducers.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.jobhistory.intermediate-done-dir</name>
-    <value>/mr-history/tmp</value>
-    <description>
-      Directory where history files are written by MapReduce jobs.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.jobhistory.done-dir</name>
-    <value>/mr-history/done</value>
-    <description>
-      Directory where history files are managed by the MR JobHistory Server.
-    </description>
-  </property>
-
-  <property>       
-    <name>mapreduce.jobhistory.address</name>
-    <value>localhost:10020</value>
-    <description>Enter your JobHistoryServer hostname.</description>
-  </property>
-
-  <property>       
-    <name>mapreduce.jobhistory.webapp.address</name>
-    <value>localhost:19888</value>
-    <description>Enter your JobHistoryServer hostname.</description>
-  </property>
-
-  <property>
-    <name>mapreduce.framework.name</name>
-    <value>yarn</value>
-    <description>
-      The runtime framework for executing MapReduce jobs. Can be one of local,
-      classic or yarn.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.app.mapreduce.am.staging-dir</name>
-    <value>/user</value>
-    <description>
-      The staging dir used while submitting jobs.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.app.mapreduce.am.resource.mb</name>
-    <value>512</value>
-    <description>The amount of memory the MR AppMaster needs.</description>
-  </property>
-
-  <property>
-    <name>yarn.app.mapreduce.am.command-opts</name>
-    <value>-Xmx312m</value>
-    <description>
-      Java opts for the MR App Master processes.
-      The following symbol, if present, will be interpolated: @taskid@ is replaced
-      by current TaskID. Any other occurrences of '@' will go unchanged.
-      For example, to enable verbose gc logging to a file named for the taskid in
-      /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of:
-      -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc
-
-      Usage of -Djava.library.path can cause programs to no longer function if
-      hadoop native libraries are used. These values should instead be set as part
-      of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
-      mapreduce.reduce.env config settings.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.app.mapreduce.am.admin-command-opts</name>
-    <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
-    <description>
-      Java opts for the MR App Master processes for admin purposes.
-      It will appears before the opts set by yarn.app.mapreduce.am.command-opts and
-      thus its options can be overridden user.
-
-      Usage of -Djava.library.path can cause programs to no longer function if
-      hadoop native libraries are used. These values should instead be set as part
-      of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
-      mapreduce.reduce.env config settings.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.app.mapreduce.am.log.level</name>
-    <value>INFO</value>
-    <description>MR App Master process log level.</description>
-  </property>
-
-  <property>
-    <name>yarn.app.mapreduce.am.env</name>
-    <value></value>
-    <description>
-      User added environment variables for the MR App Master
-      processes. Example :
-      1) A=foo  This will set the env variable A to foo
-      2) B=$B:c This is inherit tasktracker's B env variable.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.admin.map.child.java.opts</name>
-    <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
-  </property>
-
-  <property>
-    <name>mapreduce.admin.reduce.child.java.opts</name>
-    <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
-  </property>
-
-  <property>
-    <name>mapreduce.application.classpath</name>
-    <value>$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*,$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*</value>
-    <description>
-      CLASSPATH for MR applications. A comma-separated list of CLASSPATH
-      entries.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.am.max-attempts</name>
-    <value>2</value>
-    <description>
-      The maximum number of application attempts. It is a
-      application-specific setting. It should not be larger than the global number
-      set by resourcemanager. Otherwise, it will be override. The default number is
-      set to 2, to allow at least one retry for AM.
-    </description>
-  </property>
-
-
-
-  <property>
-    <name>mapreduce.map.java.opts</name>
-    <value>-Xmx756m</value>
-    <description>
-      Larger heap-size for child jvms of maps.
-    </description>
-  </property>
-
-
-  <property>
-    <name>mapreduce.reduce.java.opts</name>
-    <value>-Xmx756m</value>
-    <description>
-      Larger heap-size for child jvms of reduces.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.map.log.level</name>
-    <value>INFO</value>
-    <description>
-      The logging level for the map task. The allowed levels are:
-      OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.reduce.log.level</name>
-    <value>INFO</value>
-    <description>
-      The logging level for the reduce task. The allowed levels are:
-      OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.admin.user.env</name>
-    <value>LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/lib/hadoop/lib/native/`$JAVA_HOME/bin/java -d32 -version &amp;&gt; /dev/null;if [ $? -eq 0 ]; then echo Linux-i386-32; else echo Linux-amd64-64;fi`</value>
-    <description>
-      Additional execution environment entries for map and reduce task processes.
-      This is not an additive property. You must preserve the original value if
-      you want your map and reduce tasks to have access to native libraries (compression, etc)
-    </description>
-  </property>
-
-  <property>
-    <name>mapreduce.output.fileoutputformat.compress</name>
-    <value>false</value>
-    <description>
-      Should the job outputs be compressed?
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/configuration/yarn-site.xml
deleted file mode 100644
index 7d4d4fb..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/configuration/yarn-site.xml
+++ /dev/null
@@ -1,337 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
-
-  <!-- ResourceManager -->
-
-  <property>
-    <name>yarn.resourcemanager.hostname</name>
-    <value>localhost</value>
-    <description>The hostname of the RM.</description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.resource-tracker.address</name>
-    <value>localhost:8025</value>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.scheduler.address</name>
-    <value>localhost:8030</value>
-    <description>The address of the scheduler interface.</description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.address</name>
-    <value>localhost:8050</value>
-    <description>
-      The address of the applications manager interface in the
-      RM.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.admin.address</name>
-    <value>localhost:8141</value>
-    <description>The address of the RM admin interface.</description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.scheduler.class</name>
-    <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value>
-    <description>The class to use as the resource scheduler.</description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.minimum-allocation-mb</name>
-    <value>512</value>
-    <description>
-      TThe minimum allocation for every container request at the RM,
-      in MBs. Memory requests lower than this won't take effect,
-      and the specified value will get allocated at minimum.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.scheduler.maximum-allocation-mb</name>
-    <value>2048</value>
-    <description>
-      The maximum allocation for every container request at the RM,
-      in MBs. Memory requests higher than this won't take effect,
-      and will get capped to this value.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.acl.enable</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>yarn.admin.acl</name>
-    <value>*</value>
-  </property>
-
-  <!-- NodeManager -->
-
-  <property>
-    <name>yarn.nodemanager.address</name>
-    <value>0.0.0.0:45454</value>
-    <description>The address of the container manager in the NM.</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.resource.memory-mb</name>
-    <value>5120</value>
-    <description>Amount of physical memory, in MB, that can be allocated
-      for containers.</description>
-  </property>
-
-  <property>
-    <name>yarn.application.classpath</name>
-    <value>/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*</value>
-    <description>Classpath for typical applications.</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.vmem-pmem-ratio</name>
-    <value>2.1</value>
-    <description>Ratio between virtual memory to physical memory when
-      setting memory limits for containers. Container allocations are
-      expressed in terms of physical memory, and virtual memory usage
-      is allowed to exceed this allocation by this ratio.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.container-executor.class</name>
-    <value>org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor</value>
-    <description>ContainerExecutor for launching containers</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.linux-container-executor.group</name>
-    <value>hadoop</value>
-    <description>Unix group of the NodeManager</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.aux-services</name>
-    <value>mapreduce_shuffle</value>
-    <description>Auxilliary services of NodeManager. A valid service name should only contain a-zA-Z0-9_ and can
-      not start with numbers</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name>
-    <value>org.apache.hadoop.mapred.ShuffleHandler</value>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.log-dirs</name>
-    <value>/hadoop/yarn/log</value>
-    <description>
-      Where to store container logs. An application's localized log directory
-      will be found in ${yarn.nodemanager.log-dirs}/application_${appid}.
-      Individual containers' log directories will be below this, in directories
-      named container_{$contid}. Each container directory will contain the files
-      stderr, stdin, and syslog generated by that container.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.local-dirs</name>
-    <value>/hadoop/yarn/local</value>
-    <description>
-      List of directories to store localized files in. An
-      application's localized file directory will be found in:
-      ${yarn.nodemanager.local-dirs}/usercache/${user}/appcache/application_${appid}.
-      Individual containers' work directories, called container_${contid}, will
-      be subdirectories of this.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.container-monitor.interval-ms</name>
-    <value>3000</value>
-    <description>
-      The interval, in milliseconds, for which the node manager
-      waits  between two cycles of monitoring its containers' memory usage.
-    </description>
-  </property>
-
-  <!--
-  <property>
-    <name>yarn.nodemanager.health-checker.script.path</name>
-    <value>/etc/hadoop/conf/health_check_nodemanager</value>
-    <description>The health check script to run.</description>
-  </property>
-   -->
-
-  <property>
-    <name>yarn.nodemanager.health-checker.interval-ms</name>
-    <value>135000</value>
-    <description>Frequency of running node health script.</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.health-checker.script.timeout-ms</name>
-    <value>60000</value>
-    <description>Script time out period.</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.log.retain-second</name>
-    <value>604800</value>
-    <description>
-      Time in seconds to retain user logs. Only applicable if
-      log aggregation is disabled.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.log-aggregation-enable</name>
-    <value>true</value>
-    <description>Whether to enable log aggregation</description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.remote-app-log-dir</name>
-    <value>/app-logs</value>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.remote-app-log-dir-suffix</name>
-    <value>logs</value>
-    <description>
-      The remote log dir will be created at
-      {yarn.nodemanager.remote-app-log-dir}/${user}/{thisParam}.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.log-aggregation.compression-type</name>
-    <value>gz</value>
-    <description>
-      T-file compression types used to compress aggregated logs.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.delete.debug-delay-sec</name>
-    <value>0</value>
-    <description>
-      Number of seconds after an application finishes before the nodemanager's
-      DeletionService will delete the application's localized file directory
-      and log directory.
-
-      To diagnose Yarn application problems, set this property's value large
-      enough (for example, to 600 = 10 minutes) to permit examination of these
-      directories. After changing the property's value, you must restart the
-      nodemanager in order for it to have an effect.
-
-      The roots of Yarn applications' work directories is configurable with
-      the yarn.nodemanager.local-dirs property (see below), and the roots
-      of the Yarn applications' log directories is configurable with the
-      yarn.nodemanager.log-dirs property (see also below).
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.log-aggregation.retain-seconds</name>
-    <value>2592000</value>
-    <description>
-      How long to keep aggregation logs before deleting them. -1 disables.
-      Be careful set this too small and you will spam the name node.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.admin-env</name>
-    <value>MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX</value>
-    <description>
-      Environment variables that should be forwarded from the NodeManager's
-      environment to the container's.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.disk-health-checker.min-healthy-disks</name>
-    <value>0.25</value>
-    <description>
-      The minimum fraction of number of disks to be healthy for the nodemanager
-      to launch new containers. This correspond to both
-      yarn-nodemanager.local-dirs and yarn.nodemanager.log-dirs. i.e.
-      If there are less number of healthy local-dirs (or log-dirs) available,
-      then new containers will not be launched on this node.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.am.max-attempts</name>
-    <value>2</value>
-    <description>
-      The maximum number of application attempts. It's a global
-      setting for all application masters. Each application master can specify
-      its individual maximum number of application attempts via the API, but the
-      individual number cannot be more than the global upper bound. If it is,
-      the resourcemanager will override it. The default number is set to 2, to
-      allow at least one retry for AM.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.webapp.address</name>
-    <value>localhost:8088</value>
-    <description>
-      The address of the RM web application.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.nodemanager.vmem-check-enabled</name>
-    <value>false</value>
-    <description>
-      Whether virtual memory limits will be enforced for containers.
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.log.server.url</name>
-    <value>http://localhost:19888/jobhistory/logs</value>
-    <description>
-      URI for the HistoryServer's log resource
-    </description>
-  </property>
-
-  <property>
-    <name>yarn.resourcemanager.nodes.exclude-path</name>
-    <value>/etc/hadoop/conf/yarn.exclude</value>
-    <description>
-      Names a file that contains a list of hosts that are
-      not permitted to connect to the resource manager.  The full pathname of the
-      file must be specified.  If the value is empty, no hosts are
-      excluded.
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/metainfo.xml
deleted file mode 100644
index f25d80b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/metainfo.xml
+++ /dev/null
@@ -1,172 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>YARN</name>
-      <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
-      <version>2.1.0.2.0.6.0</version>
-      <components>
-
-        <component>
-          <name>RESOURCEMANAGER</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/resourcemanager.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <customCommands>
-            <customCommand>
-              <name>DECOMMISSION</name>
-              <commandScript>
-                <script>scripts/resourcemanager.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>600</timeout>
-              </commandScript>
-            </customCommand>
-          </customCommands>
-        </component>
-
-        <component>
-          <name>NODEMANAGER</name>
-          <category>SLAVE</category>
-          <commandScript>
-            <script>scripts/nodemanager.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>YARN_CLIENT</name>
-          <category>CLIENT</category>
-          <commandScript>
-            <script>scripts/yarn_client.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>hadoop-yarn</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hadoop-yarn-nodemanager</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hadoop-mapreduce</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hadoop-yarn-proxyserver</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hadoop-yarn-resourcemanager</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <configuration-dependencies>
-        <config-type>yarn-site</config-type>
-        <config-type>capacity-scheduler</config-type>
-        <config-type>core-site</config-type>
-        <config-type>global</config-type>
-        <config-type>mapred-site</config-type>
-        <config-type>mapred-queue-acls</config-type>
-      </configuration-dependencies>
-    </service>
-
-    <service>
-      <name>MAPREDUCE2</name>
-      <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
-      <version>2.1.0.2.0.6.0</version>
-      <components>
-        <component>
-          <name>HISTORYSERVER</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/historyserver.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>MAPREDUCE2_CLIENT</name>
-          <category>CLIENT</category>
-          <commandScript>
-            <script>scripts/mapreduce2_client.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>hadoop-mapreduce</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hadoop-mapreduce-historyserver</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/mapred_service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <configuration-dependencies>
-        <config-type>yarn-site</config-type>
-        <config-type>capacity-scheduler</config-type>
-        <config-type>core-site</config-type>
-        <config-type>global</config-type>
-        <config-type>mapred-site</config-type>
-        <config-type>mapred-queue-acls</config-type>
-      </configuration-dependencies>
-    </service>
-
-  </services>
-</metainfo>
\ No newline at end of file


[21/37] AMBARI-4341. Rename 2.0.8 to 2.1.1 in the stack definition. (mahadev)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_webui.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_webui.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_webui.sh
deleted file mode 100644
index b23045e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_webui.sh
+++ /dev/null
@@ -1,87 +0,0 @@
-#!/bin/bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-checkurl () {
-  url=$1
-  curl $url -o /dev/null
-  echo $?
-}
-
-service=$1
-host=$2
-port=$3
-
-if [[ -z "$service" || -z "$host" ]]; then
-  echo "UNKNOWN: Invalid arguments; Usage: check_webui.sh service_name host_name";
-  exit 3;
-fi
-
-case "$service" in
-
-jobtracker) 
-    jtweburl="http://$host:$port"
-    if [[ `checkurl "$jtweburl"` -ne 0 ]]; then 
-      echo "WARNING: Jobtracker web UI not accessible : $jtweburl";
-      exit 1;
-    fi
-    ;;
-namenode)
-    nnweburl="http://$host:$port"
-    if [[ `checkurl "$nnweburl"` -ne 0 ]] ; then 
-      echo "WARNING: NameNode Web UI not accessible : $nnweburl";
-      exit 1;
-    fi
-    ;;
-jobhistory)
-    jhweburl="http://$host:$port/jobhistoryhome.jsp"
-    if [[ `checkurl "$jhweburl"` -ne 0 ]]; then 
-      echo "WARNING: HistoryServer Web UI not accessible : $jhweburl";
-      exit 1;
-    fi
-    ;;
-hbase)
-    hbaseweburl="http://$host:$port/master-status"
-    if [[ `checkurl "$hbaseweburl"` -ne 0 ]]; then 
-      echo "WARNING: HBase Master Web UI not accessible : $hbaseweburl";
-      exit 1;
-    fi
-    ;;
-resourcemanager)
-    rmweburl="http://$host:$port/cluster"
-    if [[ `checkurl "$rmweburl"` -ne 0 ]]; then 
-      echo "WARNING: ResourceManager Web UI not accessible : $rmweburl";
-      exit 1;
-    fi
-    ;;
-historyserver2)
-    hsweburl="http://$host:$port/jobhistory"
-    if [[ `checkurl "$hsweburl"` -ne 0 ]]; then 
-      echo "WARNING: HistoryServer Web UI not accessible : $hsweburl";
-      exit 1;
-    fi
-    ;;
-*) echo "UNKNOWN: Invalid service name [$service], valid options [jobtracker|jobhistory|hbase|namenode|resourcemanager|historyserver2]"
-   exit 3
-   ;;
-esac
-
-echo "OK: Successfully accessed $service Web UI"
-exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/hdp_nagios_init.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/hdp_nagios_init.php b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/hdp_nagios_init.php
deleted file mode 100644
index 487eb43..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/hdp_nagios_init.php
+++ /dev/null
@@ -1,81 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* Common functions called from other alerts
- *
- */
- 
- /*
- * Function for kinit. Checks if security enabled and klist for this principal doesn't returns nothing,
- * make kinit call in this case.
- */
-  function kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name) {
-    if($security_enabled === 'true') {
-    
-      $is_logined = is_logined($principal_name);
-      
-      if (!$is_logined)
-        $status = kinit($kinit_path_local, $keytab_path, $principal_name);
-      else
-        $status = array(0, '');
-    } else {
-      $status = array(0, '');
-    }
-  
-    return $status;
-  }
-  
-  
-  /*
-  * Checks if user is logined on kerberos
-  */
-  function is_logined($principal_name) {
-    $check_cmd = "klist|grep $principal_name 1> /dev/null 2>/dev/null ; [[ $? != 0 ]] && echo 1";
-    $check_output =  shell_exec($check_cmd);
-    
-    if ($check_output)
-      return false;
-    else
-      return true;
-  }
-
-  /*
-  * Runs kinit command.
-  */
-  function kinit($kinit_path_local, $keytab_path, $principal_name) {
-    $init_cmd = "$kinit_path_local -kt $keytab_path $principal_name 2>&1";
-    $kinit_output = shell_exec($init_cmd);
-    if ($kinit_output) 
-      $status = array(1, $kinit_output);
-    else
-      $status = array(0, '');
-      
-    return $status;
-  }
-
-  function logout() {
-    if (shell_exec("rm -f /tmp/krb5cc_".trim(shell_exec('id -u'))) == "" ) 
-      $status = true;
-    else
-      $status = false;
-      
-    return $status;
-  }
- 
- ?>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/scripts/functions.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/scripts/functions.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/scripts/functions.py
deleted file mode 100644
index 964225e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/scripts/functions.py
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-from resource_management.libraries.script.config_dictionary import UnknownConfiguration
-
-def get_port_from_url(address):
-  if not is_empty(address):
-    return address.split(':')[-1]
-  else:
-    return address
-  
-def is_empty(var):
-  return isinstance(var, UnknownConfiguration)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/scripts/nagios.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/scripts/nagios.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/scripts/nagios.py
deleted file mode 100644
index af09e87..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/scripts/nagios.py
+++ /dev/null
@@ -1,97 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-from nagios_server_config import nagios_server_config
-
-def nagios():
-  import params
-
-  File( params.nagios_httpd_config_file,
-    owner = params.nagios_user,
-    group = params.nagios_group,
-    content = Template("nagios.conf.j2"),
-    mode   = 0644
-  )
-
-  # enable snmpd
-  Execute( "service snmpd start; chkconfig snmpd on",
-    path = "/usr/local/bin/:/bin/:/sbin/"
-  )
-  
-  Directory( params.conf_dir,
-    owner = params.nagios_user,
-    group = params.nagios_group
-  )
-
-  Directory( [params.plugins_dir, params.nagios_obj_dir])
-
-  Directory( params.nagios_pid_dir,
-    owner = params.nagios_user,
-    group = params.nagios_group,
-    mode = 0755,
-    recursive = True
-  )
-
-  Directory( [params.nagios_var_dir, params.check_result_path, params.nagios_rw_dir],
-    owner = params.nagios_user,
-    group = params.nagios_group,
-    recursive = True
-  )
-  
-  Directory( [params.nagios_log_dir, params.nagios_log_archives_dir],
-    owner = params.nagios_user,
-    group = params.nagios_group,
-    mode = 0755
-  )
-
-  nagios_server_config()
-
-  set_web_permisssions()
-
-  File( format("{conf_dir}/command.cfg"),
-    owner = params.nagios_user,
-    group = params.nagios_group
-  )
-  
-  
-def set_web_permisssions():
-  import params
-
-  cmd = format("{htpasswd_cmd} -c -b  /etc/nagios/htpasswd.users {nagios_web_login} {nagios_web_password}")
-  test = format("grep {nagios_web_login} /etc/nagios/htpasswd.users")
-  Execute( cmd,
-    not_if = test
-  )
-
-  File( "/etc/nagios/htpasswd.users",
-    owner = params.nagios_user,
-    group = params.nagios_group,
-    mode  = 0640
-  )
-
-  if System.get_instance().platform == "suse":
-    command = format("usermod -G {nagios_group} wwwrun")
-  else:
-    command = format("usermod -a -G {nagios_group} apache")
-  
-  Execute( command)

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/scripts/nagios_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/scripts/nagios_server.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/scripts/nagios_server.py
deleted file mode 100644
index 02685c7..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/scripts/nagios_server.py
+++ /dev/null
@@ -1,87 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-from resource_management import *
-from nagios import nagios
-from nagios_service import nagios_service
-
-         
-class NagiosServer(Script):
-  def install(self, env):
-    remove_conflicting_packages()
-    self.install_packages(env)
-    self.configure(env)
-    
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    nagios()
-
-    
-  def start(self, env):
-    import params
-    env.set_params(params)
-
-    self.configure(env) # done for updating configs after Security enabled
-    nagios_service(action='start')
-
-    
-  def stop(self, env):
-    import params
-    env.set_params(params)
-    
-    nagios_service(action='stop')
-
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.nagios_pid_file)
-    
-def remove_conflicting_packages():  
-  Package( 'hdp_mon_nagios_addons',
-    action = "remove"
-  )
-
-  Package( 'nagios-plugins',
-    action = "remove"
-  )
-
-  Execute( "rpm -e --allmatches --nopostun nagios",
-    path    = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
-    ignore_failures = True 
-  )
-
-def main():
-  command_type = sys.argv[1] if len(sys.argv)>1 else "install"
-  print "Running "+command_type
-  command_data_file = '/var/lib/ambari-agent/data/command-3.json'
-  basedir = '/root/ambari/ambari-server/src/main/resources/stacks/HDP/2.0._/services/NAGIOS/package'
-  stroutfile = '/1.txt'
-  sys.argv = ["", command_type, command_data_file, basedir, stroutfile]
-  
-  NagiosServer().execute()
-  
-if __name__ == "__main__":
-  #main()
-  NagiosServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/scripts/nagios_server_config.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/scripts/nagios_server_config.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/scripts/nagios_server_config.py
deleted file mode 100644
index b3e639c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/scripts/nagios_server_config.py
+++ /dev/null
@@ -1,91 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-def nagios_server_config():
-  import params
-  
-  nagios_server_configfile( 'nagios.cfg', 
-                            config_dir = params.conf_dir, 
-                            group = params.nagios_group
-  )
-  nagios_server_configfile( 'resource.cfg', 
-                            config_dir = params.conf_dir, 
-                            group = params.nagios_group
-  )
-  nagios_server_configfile( 'hadoop-hosts.cfg')
-  nagios_server_configfile( 'hadoop-hostgroups.cfg')
-  nagios_server_configfile( 'hadoop-servicegroups.cfg')
-  nagios_server_configfile( 'hadoop-services.cfg')
-  nagios_server_configfile( 'hadoop-commands.cfg')
-  nagios_server_configfile( 'contacts.cfg')
-  
-  if System.get_instance().platform != "suse":
-    nagios_server_configfile( 'nagios',
-                              config_dir = '/etc/init.d/', 
-                              mode = 0755, 
-                              owner = 'root', 
-                              group = 'root'
-    )
-
-  nagios_server_check( 'check_cpu.pl')
-  nagios_server_check( 'check_datanode_storage.php')
-  nagios_server_check( 'check_aggregate.php')
-  nagios_server_check( 'check_hdfs_blocks.php')
-  nagios_server_check( 'check_hdfs_capacity.php')
-  nagios_server_check( 'check_rpcq_latency.php')
-  nagios_server_check( 'check_webui.sh')
-  nagios_server_check( 'check_name_dir_status.php')
-  nagios_server_check( 'check_oozie_status.sh')
-  nagios_server_check( 'check_templeton_status.sh')
-  nagios_server_check( 'check_hive_metastore_status.sh')
-  nagios_server_check( 'check_hue_status.sh')
-  nagios_server_check( 'check_mapred_local_dir_used.sh')
-  nagios_server_check( 'check_nodemanager_health.sh')
-  nagios_server_check( 'check_namenodes_ha.sh')
-  nagios_server_check( 'hdp_nagios_init.php')
-
-
-def nagios_server_configfile(
-  name,
-  owner = None,
-  group = None,
-  config_dir = None,
-  mode = None
-):
-  import params
-  owner = params.nagios_user if not owner else owner
-  group = params.user_group if not group else group
-  config_dir = params.nagios_obj_dir if not config_dir else config_dir
-  
-  TemplateConfig( format("{config_dir}/{name}"),
-    owner          = owner,
-    group          = group,
-    mode           = mode
-  )
-
-def nagios_server_check(name):
-  File( format("{plugins_dir}/{name}"),
-    content = StaticFile(name), 
-    mode = 0755
-  )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/scripts/nagios_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/scripts/nagios_service.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/scripts/nagios_service.py
deleted file mode 100644
index cc411b5..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/scripts/nagios_service.py
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-def nagios_service(action='start'): # start or stop
-  import params
-
-  if action == 'start':
-   command = "service nagios start"
-  elif action == 'stop':
-   command = format("service nagios stop && rm -f {nagios_pid_file}")
-
-  Execute( command,
-     path    = "/usr/local/bin/:/bin/:/sbin/"      
-  )
-  MonitorWebserver("restart")
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/scripts/params.py
deleted file mode 100644
index bd7135e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/scripts/params.py
+++ /dev/null
@@ -1,162 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-from functions import get_port_from_url
-import status_params
-
-# server configurations
-config = Script.get_config()
-
-conf_dir = "/etc/nagios"
-nagios_var_dir = "/var/nagios"
-nagios_rw_dir = "/var/nagios/rw"
-plugins_dir = "/usr/lib64/nagios/plugins"
-nagios_obj_dir = "/etc/nagios/objects"
-check_result_path = "/var/nagios/spool/checkresults"
-nagios_httpd_config_file = format("/etc/httpd/conf.d/nagios.conf")
-nagios_log_dir = "/var/log/nagios"
-nagios_log_archives_dir = format("{nagios_log_dir}/archives")
-nagios_host_cfg = format("{nagios_obj_dir}/hadoop-hosts.cfg")
-nagios_lookup_daemon_str = "/usr/sbin/nagios"
-nagios_pid_dir = status_params.nagios_pid_dir
-nagios_pid_file = status_params.nagios_pid_file
-nagios_resource_cfg = format("{conf_dir}/resource.cfg")
-nagios_hostgroup_cfg = format("{nagios_obj_dir}/hadoop-hostgroups.cfg")
-nagios_servicegroup_cfg = format("{nagios_obj_dir}/hadoop-servicegroups.cfg")
-nagios_service_cfg = format("{nagios_obj_dir}/hadoop-services.cfg")
-nagios_command_cfg = format("{nagios_obj_dir}/hadoop-commands.cfg")
-eventhandlers_dir = "/usr/lib/nagios/eventhandlers"
-nagios_principal_name = default("nagios_principal_name", "nagios")
-hadoop_ssl_enabled = False
-
-namenode_metadata_port = "8020"
-oozie_server_port = "11000"
-# different to HDP1    
-namenode_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.namenode.http-address'])
-# different to HDP1  
-snamenode_port = get_port_from_url(config['configurations']['hdfs-site']["dfs.namenode.secondary.http-address"])
-
-hbase_master_rpc_port = "60000"
-rm_port = get_port_from_url(config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'])
-nm_port = "8042"
-hs_port = get_port_from_url(config['configurations']['mapred-site']['mapreduce.jobhistory.webapp.address'])
-journalnode_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.journalnode.http-address'])
-datanode_port = config['configurations']['hdfs-site']['ambari.dfs.datanode.http.port']
-flume_port = "4159"
-hive_metastore_port = config['configurations']['global']['hive_metastore_port'] #"9083"
-templeton_port = config['configurations']['webhcat-site']['templeton.port'] #"50111"
-hbase_rs_port = "60030"
-
-# this is different for HDP1
-nn_metrics_property = "FSNamesystem"
-clientPort = config['configurations']['global']['clientPort'] #ZK 
-
-
-java64_home = config['hostLevelParams']['java_home']
-security_enabled = config['configurations']['global']['security_enabled']
-
-nagios_keytab_path = default("nagios_keytab_path", "/etc/security/keytabs/nagios.service.keytab")
-kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-
-dfs_ha_enabled = False
-dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
-dfs_ha_namenode_ids = default(format("hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
-if dfs_ha_namenode_ids:
-  dfs_ha_namenode_ids_array_len = len(dfs_ha_namenode_ids.split(","))
-  if dfs_ha_namenode_ids_array_len > 1:
-    dfs_ha_enabled = True
-
-ganglia_port = "8651"
-ganglia_collector_slaves_port = "8660"
-ganglia_collector_namenode_port = "8661"
-ganglia_collector_jobtracker_port = "8662"
-ganglia_collector_hbase_port = "8663"
-ganglia_collector_rm_port = "8664"
-ganglia_collector_nm_port = "8660"
-ganglia_collector_hs_port = "8666"
-  
-all_ping_ports = config['clusterHostInfo']['all_ping_ports']
-
-if System.get_instance().platform == "suse":
-  nagios_p1_pl = "/usr/lib/nagios/p1.pl"
-  htpasswd_cmd = "htpasswd2"
-else:
-  nagios_p1_pl = "/usr/bin/p1.pl"
-  htpasswd_cmd = "htpasswd"
-  
-nagios_user = config['configurations']['global']['nagios_user']
-nagios_group = config['configurations']['global']['nagios_group']
-nagios_web_login = config['configurations']['global']['nagios_web_login']
-nagios_web_password = config['configurations']['global']['nagios_web_password']
-user_group = config['configurations']['global']['user_group']
-nagios_contact = config['configurations']['global']['nagios_contact']
-
-namenode_host = default("/clusterHostInfo/namenode_host", None)
-_snamenode_host = default("/clusterHostInfo/snamenode_host", None)
-_jtnode_host = default("/clusterHostInfo/jtnode_host", None)
-_slave_hosts = default("/clusterHostInfo/slave_hosts", None)
-_journalnode_hosts = default("/clusterHostInfo/journalnode_hosts", None)
-_zkfc_hosts = default("/clusterHostInfo/zkfc_hosts", None)
-_rm_host = default("/clusterHostInfo/rm_host", None)
-_nm_hosts = default("/clusterHostInfo/nm_hosts", None)
-_hs_host = default("/clusterHostInfo/hs_host", None)
-_zookeeper_hosts = default("/clusterHostInfo/zookeeper_hosts", None)
-_flume_hosts = default("/clusterHostInfo/flume_hosts", None)
-_nagios_server_host = default("/clusterHostInfo/nagios_server_host",None)
-_ganglia_server_host = default("/clusterHostInfo/ganglia_server_host",None)
-
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts",None)
-_hive_server_host = default("/clusterHostInfo/hive_server_host",None)
-_oozie_server = default("/clusterHostInfo/oozie_server",None)
-_webhcat_server_host = default("/clusterHostInfo/webhcat_server_host",None)
-# can differ on HDP1
-#_mapred_tt_hosts = _slave_hosts
-#if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
-_hbase_rs_hosts = default("/clusterHostInfo/hbase_rs_hosts", _slave_hosts)
-_hue_server_host = default("/clusterHostInfo/hue_server_host", None)
-all_hosts = config['clusterHostInfo']['all_hosts']
-
-
-hostgroup_defs = {
-    'namenode' : namenode_host,
-    'snamenode' : _snamenode_host,
-    'slaves' : _slave_hosts,
-    # HDP1
-    #'tasktracker-servers' : _mapred_tt_hosts,
-    'agent-servers' : all_hosts,
-    'nagios-server' : _nagios_server_host,
-    'jobtracker' : _jtnode_host,
-    'ganglia-server' : _ganglia_server_host,
-    'flume-servers' : _flume_hosts,
-    'zookeeper-servers' : _zookeeper_hosts,
-    'hbasemasters' : hbase_master_hosts,
-    'hiveserver' : _hive_server_host,
-    'region-servers' : _hbase_rs_hosts,
-    'oozie-server' : _oozie_server,
-    'webhcat-server' : _webhcat_server_host,
-    'hue-server' : _hue_server_host,
-    'resourcemanager' : _rm_host,
-    'nodemanagers' : _nm_hosts,
-    'historyserver2' : _hs_host,
-    'journalnodes' : _journalnode_hosts
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/scripts/status_params.py
deleted file mode 100644
index 33b35fe..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/scripts/status_params.py
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-nagios_pid_dir = "/var/run/nagios"
-nagios_pid_file = format("{nagios_pid_dir}/nagios.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/templates/contacts.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/templates/contacts.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/templates/contacts.cfg.j2
deleted file mode 100644
index 9dada51..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/templates/contacts.cfg.j2
+++ /dev/null
@@ -1,91 +0,0 @@
-###############################################################################
-# CONTACTS.CFG - SAMPLE CONTACT/CONTACTGROUP DEFINITIONS
-#
-# Last Modified: 05-31-2007
-#
-# NOTES: This config file provides you with some example contact and contact
-#        group definitions that you can reference in host and service
-#        definitions.
-#       
-#        You don't need to keep these definitions in a separate file from your
-#        other object definitions.  This has been done just to make things
-#        easier to understand.
-#
-###############################################################################
-
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-
-###############################################################################
-###############################################################################
-#
-# CONTACTS
-#
-###############################################################################
-###############################################################################
-
-# Just one contact defined by default - the Nagios admin (that's you)
-# This contact definition inherits a lot of default values from the 'generic-contact' 
-# template which is defined elsewhere.
-
-define contact{
-        contact_name    {{nagios_web_login}}                                        ; Short name of user
-        use             generic-contact                                             ; Inherit default values from generic-contact template (defined above)
-        alias           Nagios Admin                                                ; Full name of user
-
-        email           {{nagios_contact}}	; <<***** CHANGE THIS TO YOUR EMAIL ADDRESS ******
-        }
-
-# Contact which writes all Nagios alerts to the system logger.
-define contact{
-        contact_name                    sys_logger         ; Short name of user
-        use                             generic-contact    ; Inherit default values from generic-contact template (defined above)
-        alias                           System Logger      ; Full name of user
-        host_notifications_enabled      1
-        service_notifications_enabled   1
-        service_notification_period     24x7
-        host_notification_period        24x7
-        service_notification_options    w,u,c,r,s
-        host_notification_options       d,u,r,s
-        can_submit_commands             1
-        retain_status_information       1
-        service_notification_commands   service_sys_logger
-        host_notification_commands      host_sys_logger
-        }
-
-###############################################################################
-###############################################################################
-#
-# CONTACT GROUPS
-#
-###############################################################################
-###############################################################################
-
-# We only have one contact in this simple configuration file, so there is
-# no need to create more than one contact group.
-
-define contactgroup {
-        contactgroup_name       admins
-        alias                   Nagios Administrators
-        members                 {{nagios_web_login}},sys_logger
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/templates/hadoop-commands.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/templates/hadoop-commands.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/templates/hadoop-commands.cfg.j2
deleted file mode 100644
index e47a09e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/templates/hadoop-commands.cfg.j2
+++ /dev/null
@@ -1,114 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-{% if env.system.platform != "suse" %}
-# 'check_cpu' check remote cpu load
-define command {
-        command_name    check_cpu
-        command_line    $USER1$/check_cpu.pl -H $HOSTADDRESS$ -C hadoop -w $ARG1$ -c $ARG2$
-       }
-{% endif %}
-
-# Check data node storage full 
-define command {
-        command_name    check_datanode_storage
-        command_line    php $USER1$/check_datanode_storage.php -h $HOSTADDRESS$ -p $ARG1$ -w $ARG2$ -c $ARG3$ -e $ARG4$ -k $ARG5$ -r $ARG6$ -t $ARG7$ -s $ARG8$
-       }
-
-define command{
-        command_name    check_hdfs_blocks
-        command_line    php $USER1$/check_hdfs_blocks.php -h $ARG1$ -p $ARG2$ -w $ARG3$ -c $ARG4$ -s $ARG5$ -e $ARG6$ -k $ARG7$ -r $ARG8$ -t $ARG9$ -u $ARG10$
-       }
-
-define command{
-        command_name    check_hdfs_capacity
-        command_line    php $USER1$/check_hdfs_capacity.php -h $ARG1$ -p $ARG2$ -w $ARG3$ -c $ARG4$ -e $ARG5$ -k $ARG6$ -r $ARG7$ -t $ARG8$ -s $ARG9$
-       }
-
-define command{
-        command_name    check_aggregate
-        command_line    php $USER1$/check_aggregate.php -f /var/nagios/status.dat -s 1 -t service -n $ARG1$ -w $ARG2$ -c $ARG3$
-       }
-
-define command{
-        command_name    check_rpcq_latency
-        command_line    php $USER1$/check_rpcq_latency.php -h $HOSTADDRESS$ -p $ARG2$ -n $ARG1$ -w $ARG3$ -c $ARG4$ -e $ARG5$ -k $ARG6$ -r $ARG7$ -t $ARG8$ -s $ARG9$
-       }
-
-define command{
-        command_name    check_nagios
-        command_line    $USER1$/check_nagios -e $ARG1$ -F $ARG2$ -C $ARG3$ 
-       }
-
-define command{
-        command_name    check_webui
-        command_line    $USER1$/check_webui.sh $ARG1$ $HOSTADDRESS$ $ARG2$
-       }
-
-define command{
-        command_name    check_name_dir_status
-        command_line    php $USER1$/check_name_dir_status.php -h $HOSTADDRESS$ -p $ARG1$ -e $ARG2$ -k $ARG3$ -r $ARG4$ -t $ARG5$ -s $ARG6$
-       }
-
-define command{
-        command_name    check_oozie_status
-        command_line    $USER1$/check_oozie_status.sh $HOSTADDRESS$ $ARG1$ $ARG2$ $ARG3$ $ARG4$ $ARG5$ $ARG6$ $ARG7$
-       }
-
-define command{
-        command_name    check_templeton_status
-        command_line    $USER1$/check_templeton_status.sh $HOSTADDRESS$ $ARG1$ $ARG2$ $ARG3$ $ARG4$ $ARG5$ $ARG6$ $ARG7$
-       }
-
-define command{
-        command_name    check_hive_metastore_status
-        command_line    $USER1$/check_hive_metastore_status.sh $HOSTADDRESS$ $ARG1$ $ARG2$ $ARG3$ $ARG4$ $ARG5$ $ARG6$ $ARG7$
-       }
-define command{
-        command_name    check_hue_status
-        command_line    $USER1$/check_hue_status.sh
-       }
-
-define command{
-       command_name    check_mapred_local_dir_used_space
-       command_line    $USER1$/check_mapred_local_dir_used.sh $ARG1$ $ARG2$
-       }
-
-define command{
-       command_name    check_namenodes_ha
-       command_line    $USER1$/check_namenodes_ha.sh $ARG1$ $ARG2$
-       }
-
-define command{
-        command_name    check_nodemanager_health
-        command_line    $USER1$/check_nodemanager_health.sh $HOSTADDRESS$ $ARG1$
-       }
-
-define command{
-        command_name    host_sys_logger
-        command_line    $USER1$/sys_logger.py $HOSTSTATETYPE$ $HOSTATTEMPT$ $HOSTSTATE$ "Host::Ping" "Event Host=$HOSTADDRESS$($HOSTSTATE$), $HOSTOUTPUT$ $LONGHOSTOUTPUT$"
-       }
-
-define command{
-        command_name    service_sys_logger
-        command_line    $USER1$/sys_logger.py $SERVICESTATETYPE$ $SERVICEATTEMPT$ $SERVICESTATE$ "$SERVICEDESC$" "Event Host=$HOSTADDRESS$ Service Description=$SERVICEDESC$($SERVICESTATE$), $SERVICEOUTPUT$ $LONGSERVICEOUTPUT$"
-       }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2
deleted file mode 100644
index 2bcbf7c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2
+++ /dev/null
@@ -1,15 +0,0 @@
-{% for name, hosts in hostgroup_defs.iteritems() %}
-{% if hosts %}
-define hostgroup {
-        hostgroup_name  {{name}}
-        alias           {{name}}
-        members         {{','.join(hosts)}}
-}
-{% endif %}
-{% endfor %}
-
-define hostgroup {
-        hostgroup_name  all-servers
-        alias           All Servers
-        members         {{','.join(all_hosts)}}
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2
deleted file mode 100644
index 62555d4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2
+++ /dev/null
@@ -1,16 +0,0 @@
-{% for host in all_hosts %}
-define host {
-        alias        {{host}}
-        host_name    {{host}}
-        use          linux-server
-        address      {{host}}
-        check_interval         0.25
-        retry_interval         0.25
-        max_check_attempts     4
-        notifications_enabled     1
-        first_notification_delay  0     # Send notification soon after change in the hard state
-        notification_interval     0     # Send the notification once
-        notification_options      d,u,r
-}
-
-{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2
deleted file mode 100644
index 0101ce6..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2
+++ /dev/null
@@ -1,80 +0,0 @@
-{% if hostgroup_defs['namenode'] or
-  hostgroup_defs['snamenode']  or
-  hostgroup_defs['slaves'] %}
-define servicegroup {
-  servicegroup_name  HDFS
-  alias  HDFS Checks
-}
-{% endif %}
-{%if hostgroup_defs['jobtracker'] or
-  hostgroup_defs['historyserver2']-%}
-define servicegroup {
-  servicegroup_name  MAPREDUCE
-  alias  MAPREDUCE Checks
-}
-{% endif %}
-{%if hostgroup_defs['resourcemanager'] or
-  hostgroup_defs['nodemanagers'] %}
-define servicegroup {
-  servicegroup_name  YARN
-  alias  YARN Checks
-}
-{% endif %}
-{%if hostgroup_defs['flume-servers'] %}
-define servicegroup {
-  servicegroup_name  FLUME
-  alias  FLUME Checks
-}
-{% endif %}
-{%if hostgroup_defs['hbasemasters'] %}
-define servicegroup {
-  servicegroup_name  HBASE
-  alias  HBASE Checks
-}
-{% endif %}
-{% if hostgroup_defs['oozie-server'] %}
-define servicegroup {
-  servicegroup_name  OOZIE
-  alias  OOZIE Checks
-}
-{% endif %}
-{% if hostgroup_defs['webhcat-server'] %}
-define servicegroup {
-  servicegroup_name  WEBHCAT
-  alias  WEBHCAT Checks
-}
-{% endif %}
-{% if hostgroup_defs['nagios-server'] %}
-define servicegroup {
-  servicegroup_name  NAGIOS
-  alias  NAGIOS Checks
-}
-{% endif %}
-{% if hostgroup_defs['ganglia-server'] %}
-define servicegroup {
-  servicegroup_name  GANGLIA
-  alias  GANGLIA Checks
-}
-{% endif %}
-{% if hostgroup_defs['hiveserver'] %}
-define servicegroup {
-  servicegroup_name  HIVE-METASTORE
-  alias  HIVE-METASTORE Checks
-}
-{% endif %}
-{% if hostgroup_defs['zookeeper-servers'] %}
-define servicegroup {
-  servicegroup_name  ZOOKEEPER
-  alias  ZOOKEEPER Checks
-}
-{% endif %}
-define servicegroup {
-  servicegroup_name  AMBARI
-  alias  AMBARI Checks
-}
-{% if hostgroup_defs['hue-server'] %}
-define servicegroup {
-  servicegroup_name  HUE
-  alias  HUE Checks
-}
-{% endif %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/templates/hadoop-services.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/templates/hadoop-services.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/templates/hadoop-services.cfg.j2
deleted file mode 100644
index 5941c15..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/templates/hadoop-services.cfg.j2
+++ /dev/null
@@ -1,643 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-{# TODO: Look for { or } in created file #}
-# NAGIOS SERVER Check (status log update)
-{% if hostgroup_defs['nagios-server'] %}
-define service {
-        name                            hadoop-service
-        use                             generic-service
-        notification_options            w,u,c,r,f,s
-        first_notification_delay        0
-        notification_interval           0                 # Send the notification once
-        contact_groups                  admins
-        notifications_enabled           1
-        event_handler_enabled           1
-        register                        0
-}
-
-define service {        
-        hostgroup_name          nagios-server        
-        use                     hadoop-service
-        service_description     NAGIOS::Nagios status log freshness
-        servicegroups           NAGIOS
-        check_command           check_nagios!10!/var/nagios/status.dat!{{nagios_lookup_daemon_str}}
-        normal_check_interval   5
-        retry_check_interval    0.5
-        max_check_attempts      2
-}
-
-# NAGIOS SERVER HDFS Checks
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::Percent DataNodes with space available
-        servicegroups           HDFS
-        check_command           check_aggregate!"DATANODE::DataNode space"!10%!30%
-        normal_check_interval   2
-        retry_check_interval    1 
-        max_check_attempts      1
-}
-
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::Percent DataNodes live
-        servicegroups           HDFS
-        check_command           check_aggregate!"DATANODE::DataNode process"!10%!30%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{# used only for HDP2 #}
-{% if hostgroup_defs['namenode'] and dfs_ha_enabled %}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::NameNode HA Healthy
-        servicegroups           HDFS
-        check_command           check_namenodes_ha!$HOSTGROUPMEMBERS:namenode$!{{ namenode_port }}
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      5
-}
-{% endif %}
-
-# AMBARI AGENT Checks
-{% for hostname in all_hosts %}
-define service {
-        host_name	        {{ hostname }}
-        use                     hadoop-service
-        service_description     AMBARI::Ambari Agent process
-        servicegroups           AMBARI
-        check_command           check_tcp!{{all_ping_ports[loop.index-1]}}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-
-{% endfor %}
-
-# NAGIOS SERVER ZOOKEEPER Checks
-{% if hostgroup_defs['zookeeper-servers'] %}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     ZOOKEEPER::Percent ZooKeeper Servers live
-        servicegroups           ZOOKEEPER
-        check_command           check_aggregate!"ZOOKEEPER::ZooKeeper Server process"!35%!70%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-
-# NAGIOS SERVER HBASE Checks
-{% if hostgroup_defs['hbasemasters'] %}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HBASE::Percent RegionServers live
-        servicegroups           HBASE
-        check_command           check_aggregate!"REGIONSERVER::RegionServer process"!10%!30%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-{% endif %}
-
-
-
-# GANGLIA SERVER Checks
-{% if hostgroup_defs['ganglia-server'] %}
-define service {
-        hostgroup_name          ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Server process
-        servicegroups           GANGLIA
-        check_command           check_tcp!{{ ganglia_port }}!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-
-define service {
-        hostgroup_name          ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Monitor process for NameNode
-        servicegroups           GANGLIA
-        check_command           check_tcp!{{ ganglia_collector_namenode_port }}!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-
-{% if hostgroup_defs['jobtracker'] %}
-define service {
-        hostgroup_name          ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Monitor process for JobTracker
-        servicegroups           GANGLIA
-        check_command           check_tcp!{{ ganglia_collector_jobtracker_port }}!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-{% endif %}
-
-{% if hostgroup_defs['hbasemasters'] %}
-define service {
-        hostgroup_name          ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Monitor process for HBase Master
-        servicegroups           GANGLIA
-        check_command           check_tcp!{{ ganglia_collector_hbase_port }}!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-{% endif %}
-
-{% if hostgroup_defs['resourcemanager'] %}
-define service {
-        hostgroup_name          ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Monitor process for ResourceManager
-        servicegroups           GANGLIA
-        check_command           check_tcp!{{ ganglia_collector_rm_port }}!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-{% endif %}
-
-{% if hostgroup_defs['historyserver2'] %}
-define service {
-        hostgroup_name          ganglia-server
-        use                     hadoop-service
-        service_description     GANGLIA::Ganglia Monitor process for HistoryServer
-        servicegroups           GANGLIA
-        check_command           check_tcp!{{ ganglia_collector_hs_port }}!-w 1 -c 1
-        normal_check_interval   0.25
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-{% endif %}
-
-{% endif %}
-
-{% if hostgroup_defs['snamenode'] %}
-# Secondary namenode checks
-define service {
-        hostgroup_name          snamenode
-        use                     hadoop-service
-        service_description     NAMENODE::Secondary NameNode process
-        servicegroups           HDFS
-        check_command           check_tcp!{{ snamenode_port }}!-w 1 -c 1
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-
-
-{% if hostgroup_defs['namenode'] %}
-# HDFS Checks
-{%  for namenode_hostname in namenode_host %}
-{# TODO: check if we can get rid of str, lower #}
-define service {
-        host_name               {{ namenode_hostname }}
-        use                     hadoop-service
-        service_description     NAMENODE::NameNode edit logs directory status on {{ namenode_hostname }}
-        servicegroups           HDFS
-        check_command           check_name_dir_status!{{ namenode_port }}!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   0.5
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-{% if env.system.platform != "suse" %}
-define service {
-        host_name               {{ namenode_hostname }}
-        use                     hadoop-service
-        service_description     NAMENODE::NameNode host CPU utilization on {{ namenode_hostname }}
-        servicegroups           HDFS
-        check_command           check_cpu!200%!250%
-        normal_check_interval   5
-        retry_check_interval    2
-        max_check_attempts      5
-}
-{% endif %}
-
-define service {
-        host_name               {{ namenode_hostname }}
-        use                     hadoop-service
-        service_description     NAMENODE::NameNode Web UI on {{ namenode_hostname }}
-        servicegroups           HDFS
-        check_command           check_webui!namenode!{{ namenode_port }}
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-
-define service {
-        host_name               {{ namenode_hostname }}
-        use                     hadoop-service
-        service_description     NAMENODE::NameNode process on {{ namenode_hostname }}
-        servicegroups           HDFS
-        check_command           check_tcp!{{ namenode_metadata_port }}!-w 1 -c 1
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-
-define service {
-        host_name               {{ namenode_hostname }}
-        use                     hadoop-service
-        service_description     HDFS::NameNode RPC latency on {{ namenode_hostname }}
-        servicegroups           HDFS
-        check_command           check_rpcq_latency!NameNode!{{ namenode_port }}!3000!5000!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   5
-        retry_check_interval    1
-        max_check_attempts      5
-}
-
-{%  endfor  %}
-
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::Blocks health
-        servicegroups           HDFS
-        check_command           check_hdfs_blocks!$HOSTGROUPMEMBERS:namenode$!{{ namenode_port }}!0%!0%!{{ nn_metrics_property }}!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   2
-        retry_check_interval    1 
-        max_check_attempts      1
-}
-
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::HDFS capacity utilization
-        servicegroups           HDFS
-        check_command           check_hdfs_capacity!$HOSTGROUPMEMBERS:namenode$!{{ namenode_port }}!80%!90%!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   10
-        retry_check_interval    1 
-        max_check_attempts      1
-}
-
-{% endif %}
-
-# MAPREDUCE Checks
-{# On HDP1 here are jobtracker and tasktracker alters #}
-
-{% if hostgroup_defs['resourcemanager'] %}
-# YARN::RESOURCEMANAGER Checks 
-define service {
-        hostgroup_name          resourcemanager
-        use                     hadoop-service
-        service_description     RESOURCEMANAGER::ResourceManager Web UI
-        servicegroups           YARN
-        check_command           check_webui!resourcemanager!{{ rm_port }}
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-
-{% if env.system.platform != "suse" %}
-define service {
-        hostgroup_name          resourcemanager
-        use                     hadoop-service
-        service_description     RESOURCEMANAGER::ResourceManager CPU utilization
-        servicegroups           YARN
-        check_command           check_cpu!200%!250%
-        normal_check_interval   5
-        retry_check_interval    2 
-        max_check_attempts      5
-}
-{% endif %}
-
-define service {
-        hostgroup_name          resourcemanager
-        use                     hadoop-service
-        service_description     RESOURCEMANAGER::ResourceManager RPC latency
-        servicegroups           YARN
-        check_command           check_rpcq_latency!ResourceManager!{{ rm_port }}!3000!5000!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   5
-        retry_check_interval    1 
-        max_check_attempts      5
-}
-
-define service {
-        hostgroup_name          resourcemanager
-        use                     hadoop-service
-        service_description     RESOURCEMANAGER::ResourceManager process
-        servicegroups           YARN
-        check_command           check_tcp!{{ rm_port }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{%  endif %}
-
-{% if hostgroup_defs['nodemanagers'] %}
-# YARN::NODEMANAGER Checks
-define service {
-        hostgroup_name          nodemanagers
-        use                     hadoop-service
-        service_description     NODEMANAGER::NodeManager process
-        servicegroups           YARN
-        check_command           check_tcp!{{ nm_port }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-define service {
-        hostgroup_name          nodemanagers
-        use                     hadoop-service
-        service_description     NODEMANAGER::NodeManager health
-        servicegroups           YARN
-        check_command           check_nodemanager_health!{{ nm_port }}!{{ str(security_enabled).lower() }}!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     NODEMANAGER::Percent NodeManagers live
-        servicegroups           YARN
-        check_command           check_aggregate!"NODEMANAGER::NodeManager process"!10%!30%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{%  endif %}
-
-{% if hostgroup_defs['historyserver2'] %}
-# MAPREDUCE::JOBHISTORY Checks
-define service {
-        hostgroup_name          historyserver2
-        use                     hadoop-service
-        service_description     JOBHISTORY::HistoryServer Web UI
-        servicegroups           MAPREDUCE
-        check_command           check_webui!historyserver2!{{ hs_port }}
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-
-{% if env.system.platform != "suse" %}
-define service {
-        hostgroup_name          historyserver2
-        use                     hadoop-service
-        service_description     JOBHISTORY::HistoryServer CPU utilization
-        servicegroups           MAPREDUCE
-        check_command           check_cpu!200%!250%
-        normal_check_interval   5
-        retry_check_interval    2 
-        max_check_attempts      5
-}
-{%  endif %}
-
-define service {
-        hostgroup_name          historyserver2
-        use                     hadoop-service
-        service_description     JOBHISTORY::HistoryServer RPC latency
-        servicegroups           MAPREDUCE
-        check_command           check_rpcq_latency!JobHistoryServer!{{ hs_port }}!3000!5000!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   5
-        retry_check_interval    1 
-        max_check_attempts      5
-}
-
-define service {
-        hostgroup_name          historyserver2
-        use                     hadoop-service
-        service_description     JOBHISTORY::HistoryServer process
-        servicegroups           MAPREDUCE
-        check_command           check_tcp!{{ hs_port }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-{%  endif %}
-
-{% if hostgroup_defs['journalnodes'] %}
-# Journalnode checks
-define service {
-        hostgroup_name          journalnodes
-        use                     hadoop-service
-        service_description     JOURNALNODE::JournalNode process
-        servicegroups           HDFS
-        check_command           check_tcp!{{ journalnode_port }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-{% if dfs_ha_enabled %}
-define service {
-        hostgroup_name          nagios-server
-        use                     hadoop-service
-        service_description     HDFS::Percent JournalNodes live
-        servicegroups           HDFS
-        check_command           check_aggregate!"JOURNALNODE::JournalNode process"!33%!50%
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      3
-}
-{% endif %}
-{% endif %}
-
-{% if hostgroup_defs['slaves'] %}
-# HDFS::DATANODE Checks
-define service {
-        hostgroup_name          slaves
-        use                     hadoop-service
-        service_description     DATANODE::DataNode process
-        servicegroups           HDFS
-        check_command           check_tcp!{{datanode_port}}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-define service {
-        hostgroup_name          slaves
-        use                     hadoop-service
-        service_description     DATANODE::DataNode space
-        servicegroups           HDFS
-        check_command           check_datanode_storage!{{ datanode_port }}!90%!90%!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
-        normal_check_interval   5
-        retry_check_interval    1
-        max_check_attempts      2
-}
-
-{% endif %}
-
-{% if hostgroup_defs['flume-servers'] %}
-# FLUME Checks
-define service {
-        hostgroup_name          flume-servers
-        use                     hadoop-service
-        service_description     FLUME::Flume Agent process
-        servicegroups           FLUME
-        check_command           check_tcp!{{ flume_port }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-
-
-{% if hostgroup_defs['zookeeper-servers'] %}
-# ZOOKEEPER Checks
-define service {
-        hostgroup_name          zookeeper-servers
-        use                     hadoop-service
-        service_description     ZOOKEEPER::ZooKeeper Server process
-        servicegroups           ZOOKEEPER
-        check_command           check_tcp!{{ clientPort }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-
-{% if hostgroup_defs['hbasemasters'] %}
-# HBASE::REGIONSERVER Checks
-define service {
-        hostgroup_name          region-servers
-        use                     hadoop-service
-        service_description     REGIONSERVER::RegionServer process
-        servicegroups           HBASE
-        check_command           check_tcp!{{ hbase_rs_port }}!-w 1 -c 1
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-
-{# HBASE:: MASTER Checks
-# define service {
-#         hostgroup_name          hbasemasters
-#         use                     hadoop-service
-#         service_description     HBASEMASTER::HBase Master Web UI
-#         servicegroups           HBASE
-#         check_command           check_webui!hbase!{{ hbase_master_port }}
-#         normal_check_interval   1
-#         retry_check_interval    1
-#         max_check_attempts      3
-# #}
-{%  for hbasemaster in hbase_master_hosts  %}
-{% if env.system.platform != "suse" %}
-define service {
-        host_name               {{ hbasemaster }}
-        use                     hadoop-service
-        service_description     HBASEMASTER::HBase Master CPU utilization on {{ hbasemaster }}
-        servicegroups           HBASE
-        check_command           check_cpu!200%!250%
-        normal_check_interval   5
-        retry_check_interval    2 
-        max_check_attempts      5
-}
-{%  endif %}
-define service {
-        host_name               {{ hbasemaster }}
-        use                     hadoop-service
-        service_description     HBASEMASTER::HBase Master process on {{ hbasemaster }}
-        servicegroups           HBASE
-        check_command           check_tcp!{{ hbase_master_rpc_port }}!-w 1 -c 1
-        normal_check_interval   0.5
-        retry_check_interval    0.25
-        max_check_attempts      4
-}
-{% endfor %}
-{% endif %}
-
-{% if hostgroup_defs['hiveserver'] %}
-# HIVE Metastore check
-define service {
-        hostgroup_name          hiveserver
-        use                     hadoop-service
-        service_description     HIVE-METASTORE::Hive Metastore status
-        servicegroups           HIVE-METASTORE
-        {% if security_enabled %}
-        check_command           check_hive_metastore_status!{{ hive_metastore_port }}!{{ java64_home }}!true!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}
-        {% else %}
-        check_command           check_hive_metastore_status!{{ hive_metastore_port }}!{{ java64_home }}!false
-        {% endif %}
-        normal_check_interval   0.5
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-{% if hostgroup_defs['oozie-server'] %}
-# Oozie check
-define service {
-        hostgroup_name          oozie-server
-        use                     hadoop-service
-        service_description     OOZIE::Oozie Server status
-        servicegroups           OOZIE
-        {% if security_enabled %}
-        check_command           check_oozie_status!{{ oozie_server_port }}!{{ java64_home }}!true!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}
-        {% else %}
-        check_command           check_oozie_status!{{ oozie_server_port }}!{{ java64_home }}!false
-        {% endif %}
-        normal_check_interval   1
-        retry_check_interval    1
-        max_check_attempts      3
-}
-{% endif %}
-{% if hostgroup_defs['webhcat-server'] %}
-# WEBHCAT check
-define service {
-        hostgroup_name          webhcat-server
-        use                     hadoop-service
-        service_description     WEBHCAT::WebHCat Server status
-        servicegroups           WEBHCAT 
-        {% if security_enabled %}
-        check_command           check_templeton_status!{{ templeton_port }}!v1!{{ str(security_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}
-        {% else %}
-        check_command           check_templeton_status!{{ templeton_port }}!v1!false
-        {% endif %}
-        normal_check_interval   1
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-
-{% if hostgroup_defs['hue-server'] %}
-define service {
-        hostgroup_name          hue-server
-        use                     hadoop-service
-        service_description     HUE::Hue Server status
-        servicegroups           HUE
-        check_command           check_hue_status
-        normal_check_interval   100
-        retry_check_interval    0.5
-        max_check_attempts      3
-}
-{% endif %}
-


[31/37] AMBARI-4341. Rename 2.0.8 to 2.1.1 in the stack definition. (mahadev)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/nagios.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/nagios.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/nagios.cfg.j2
new file mode 100644
index 0000000..acb2522
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/nagios.cfg.j2
@@ -0,0 +1,1349 @@
+##############################################################################
+#
+# NAGIOS.CFG - Sample Main Config File for Nagios 3.2.3
+#
+# Read the documentation for more information on this configuration
+# file.  I've provided some comments here, but things may not be so
+# clear without further explanation.
+#
+# Last Modified: 12-14-2008
+#
+##############################################################################
+
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+# LOG FILE
+# This is the main log file where service and host events are logged
+# for historical purposes.  This should be the first option specified 
+# in the config file!!!
+
+log_file=/var/log/nagios/nagios.log
+
+
+
+# OBJECT CONFIGURATION FILE(S)
+# These are the object configuration files in which you define hosts,
+# host groups, contacts, contact groups, services, etc.
+# You can split your object definitions across several config files
+# if you wish (as shown below), or keep them all in a single config file.
+
+# You can specify individual object config files as shown below:
+cfg_file=/etc/nagios/objects/commands.cfg
+cfg_file=/etc/nagios/objects/contacts.cfg
+cfg_file=/etc/nagios/objects/timeperiods.cfg
+cfg_file=/etc/nagios/objects/templates.cfg
+
+# Definitions for monitoring the local (Linux) host
+#cfg_file=/etc/nagios/objects/localhost.cfg
+
+# Definitions for monitoring a Windows machine
+#cfg_file=/etc/nagios/objects/windows.cfg
+
+# Definitions for monitoring a router/switch
+#cfg_file=/etc/nagios/objects/switch.cfg
+
+# Definitions for monitoring a network printer
+#cfg_file=/etc/nagios/objects/printer.cfg
+
+# Definitions for hadoop servers
+cfg_file={{nagios_host_cfg}}
+cfg_file={{nagios_hostgroup_cfg}}
+cfg_file={{nagios_servicegroup_cfg}}
+cfg_file={{nagios_service_cfg}}
+cfg_file={{nagios_command_cfg}}
+
+
+# You can also tell Nagios to process all config files (with a .cfg
+# extension) in a particular directory by using the cfg_dir
+# directive as shown below:
+
+#cfg_dir=/etc/nagios/servers
+#cfg_dir=/etc/nagios/printers
+#cfg_dir=/etc/nagios/switches
+#cfg_dir=/etc/nagios/routers
+
+
+
+
+# OBJECT CACHE FILE
+# This option determines where object definitions are cached when
+# Nagios starts/restarts.  The CGIs read object definitions from 
+# this cache file (rather than looking at the object config files
+# directly) in order to prevent inconsistencies that can occur
+# when the config files are modified after Nagios starts.
+
+object_cache_file=/var/nagios/objects.cache
+
+
+
+# PRE-CACHED OBJECT FILE
+# This options determines the location of the precached object file.
+# If you run Nagios with the -p command line option, it will preprocess
+# your object configuration file(s) and write the cached config to this
+# file.  You can then start Nagios with the -u option to have it read
+# object definitions from this precached file, rather than the standard
+# object configuration files (see the cfg_file and cfg_dir options above).
+# Using a precached object file can speed up the time needed to (re)start 
+# the Nagios process if you've got a large and/or complex configuration.
+# Read the documentation section on optimizing Nagios to find our more
+# about how this feature works.
+
+precached_object_file=/var/nagios/objects.precache
+
+
+
+# RESOURCE FILE
+# This is an optional resource file that contains $USERx$ macro
+# definitions. Multiple resource files can be specified by using
+# multiple resource_file definitions.  The CGIs will not attempt to
+# read the contents of resource files, so information that is
+# considered to be sensitive (usernames, passwords, etc) can be
+# defined as macros in this file and restrictive permissions (600)
+# can be placed on this file.
+
+resource_file={{nagios_resource_cfg}}
+
+
+
+# STATUS FILE
+# This is where the current status of all monitored services and
+# hosts is stored.  Its contents are read and processed by the CGIs.
+# The contents of the status file are deleted every time Nagios
+#  restarts.
+
+status_file=/var/nagios/status.dat
+
+
+
+# STATUS FILE UPDATE INTERVAL
+# This option determines the frequency (in seconds) that
+# Nagios will periodically dump program, host, and 
+# service status data.
+
+status_update_interval=10
+
+
+
+# NAGIOS USER
+# This determines the effective user that Nagios should run as.  
+# You can either supply a username or a UID.
+
+nagios_user={{nagios_user}}
+
+
+
+# NAGIOS GROUP
+# This determines the effective group that Nagios should run as.  
+# You can either supply a group name or a GID.
+
+nagios_group={{nagios_group}}
+
+
+
+# EXTERNAL COMMAND OPTION
+# This option allows you to specify whether or not Nagios should check
+# for external commands (in the command file defined below).  By default
+# Nagios will *not* check for external commands, just to be on the
+# cautious side.  If you want to be able to use the CGI command interface
+# you will have to enable this.
+# Values: 0 = disable commands, 1 = enable commands
+
+check_external_commands=1
+
+
+
+# EXTERNAL COMMAND CHECK INTERVAL
+# This is the interval at which Nagios should check for external commands.
+# This value works of the interval_length you specify later.  If you leave
+# that at its default value of 60 (seconds), a value of 1 here will cause
+# Nagios to check for external commands every minute.  If you specify a
+# number followed by an "s" (i.e. 15s), this will be interpreted to mean
+# actual seconds rather than a multiple of the interval_length variable.
+# Note: In addition to reading the external command file at regularly 
+# scheduled intervals, Nagios will also check for external commands after
+# event handlers are executed.
+# NOTE: Setting this value to -1 causes Nagios to check the external
+# command file as often as possible.
+
+#command_check_interval=15s
+command_check_interval=-1
+
+
+
+# EXTERNAL COMMAND FILE
+# This is the file that Nagios checks for external command requests.
+# It is also where the command CGI will write commands that are submitted
+# by users, so it must be writeable by the user that the web server
+# is running as (usually 'nobody').  Permissions should be set at the 
+# directory level instead of on the file, as the file is deleted every
+# time its contents are processed.
+
+command_file=/var/nagios/rw/nagios.cmd
+
+
+
+# EXTERNAL COMMAND BUFFER SLOTS
+# This settings is used to tweak the number of items or "slots" that
+# the Nagios daemon should allocate to the buffer that holds incoming 
+# external commands before they are processed.  As external commands 
+# are processed by the daemon, they are removed from the buffer.  
+
+external_command_buffer_slots=4096
+
+
+
+# LOCK FILE
+# This is the lockfile that Nagios will use to store its PID number
+# in when it is running in daemon mode.
+
+lock_file={{nagios_pid_file}}
+
+
+
+# TEMP FILE
+# This is a temporary file that is used as scratch space when Nagios
+# updates the status log, cleans the comment file, etc.  This file
+# is created, used, and deleted throughout the time that Nagios is
+# running.
+
+temp_file=/var/nagios/nagios.tmp
+
+
+
+# TEMP PATH
+# This is path where Nagios can create temp files for service and
+# host check results, etc.
+
+temp_path=/tmp
+
+
+
+# EVENT BROKER OPTIONS
+# Controls what (if any) data gets sent to the event broker.
+# Values:  0      = Broker nothing
+#         -1      = Broker everything
+#         <other> = See documentation
+
+event_broker_options=-1
+
+
+
+# EVENT BROKER MODULE(S)
+# This directive is used to specify an event broker module that should
+# by loaded by Nagios at startup.  Use multiple directives if you want
+# to load more than one module.  Arguments that should be passed to
+# the module at startup are seperated from the module path by a space.
+#
+#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+# WARNING !!! WARNING !!! WARNING !!! WARNING !!! WARNING !!! WARNING
+#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+#
+# Do NOT overwrite modules while they are being used by Nagios or Nagios
+# will crash in a fiery display of SEGFAULT glory.  This is a bug/limitation
+# either in dlopen(), the kernel, and/or the filesystem.  And maybe Nagios...
+#
+# The correct/safe way of updating a module is by using one of these methods:
+#    1. Shutdown Nagios, replace the module file, restart Nagios
+#    2. Delete the original module file, move the new module file into place, restart Nagios
+#
+# Example:
+#
+#   broker_module=<modulepath> [moduleargs]
+
+#broker_module=/somewhere/module1.o
+#broker_module=/somewhere/module2.o arg1 arg2=3 debug=0
+
+
+
+# LOG ROTATION METHOD
+# This is the log rotation method that Nagios should use to rotate
+# the main log file. Values are as follows..
+#	n	= None - don't rotate the log
+#	h	= Hourly rotation (top of the hour)
+#	d	= Daily rotation (midnight every day)
+#	w	= Weekly rotation (midnight on Saturday evening)
+#	m	= Monthly rotation (midnight last day of month)
+
+log_rotation_method=d
+
+
+
+# LOG ARCHIVE PATH
+# This is the directory where archived (rotated) log files should be 
+# placed (assuming you've chosen to do log rotation).
+
+log_archive_path=/var/log/nagios/archives
+
+
+
+# LOGGING OPTIONS
+# If you want messages logged to the syslog facility, as well as the
+# Nagios log file set this option to 1.  If not, set it to 0.
+
+use_syslog=1
+
+
+
+# NOTIFICATION LOGGING OPTION
+# If you don't want notifications to be logged, set this value to 0.
+# If notifications should be logged, set the value to 1.
+
+log_notifications=1
+
+
+
+# SERVICE RETRY LOGGING OPTION
+# If you don't want service check retries to be logged, set this value
+# to 0.  If retries should be logged, set the value to 1.
+
+log_service_retries=1
+
+
+
+# HOST RETRY LOGGING OPTION
+# If you don't want host check retries to be logged, set this value to
+# 0.  If retries should be logged, set the value to 1.
+
+log_host_retries=1
+
+
+
+# EVENT HANDLER LOGGING OPTION
+# If you don't want host and service event handlers to be logged, set
+# this value to 0.  If event handlers should be logged, set the value
+# to 1.
+
+log_event_handlers=1
+
+
+
+# INITIAL STATES LOGGING OPTION
+# If you want Nagios to log all initial host and service states to
+# the main log file (the first time the service or host is checked)
+# you can enable this option by setting this value to 1.  If you
+# are not using an external application that does long term state
+# statistics reporting, you do not need to enable this option.  In
+# this case, set the value to 0.
+
+log_initial_states=0
+
+
+
+# EXTERNAL COMMANDS LOGGING OPTION
+# If you don't want Nagios to log external commands, set this value
+# to 0.  If external commands should be logged, set this value to 1.
+# Note: This option does not include logging of passive service
+# checks - see the option below for controlling whether or not
+# passive checks are logged.
+
+log_external_commands=1
+
+
+
+# PASSIVE CHECKS LOGGING OPTION
+# If you don't want Nagios to log passive host and service checks, set
+# this value to 0.  If passive checks should be logged, set
+# this value to 1.
+
+log_passive_checks=1
+
+
+
+# GLOBAL HOST AND SERVICE EVENT HANDLERS
+# These options allow you to specify a host and service event handler
+# command that is to be run for every host or service state change.
+# The global event handler is executed immediately prior to the event
+# handler that you have optionally specified in each host or
+# service definition. The command argument is the short name of a
+# command definition that you define in your host configuration file.
+# Read the HTML docs for more information.
+
+#global_host_event_handler=somecommand
+#global_service_event_handler=somecommand
+
+
+
+# SERVICE INTER-CHECK DELAY METHOD
+# This is the method that Nagios should use when initially
+# "spreading out" service checks when it starts monitoring.  The
+# default is to use smart delay calculation, which will try to
+# space all service checks out evenly to minimize CPU load.
+# Using the dumb setting will cause all checks to be scheduled
+# at the same time (with no delay between them)!  This is not a
+# good thing for production, but is useful when testing the
+# parallelization functionality.
+#	n	= None - don't use any delay between checks
+#	d	= Use a "dumb" delay of 1 second between checks
+#	s	= Use "smart" inter-check delay calculation
+#       x.xx    = Use an inter-check delay of x.xx seconds
+
+service_inter_check_delay_method=s
+
+
+
+# MAXIMUM SERVICE CHECK SPREAD
+# This variable determines the timeframe (in minutes) from the
+# program start time that an initial check of all services should
+# be completed.  Default is 30 minutes.
+
+max_service_check_spread=30
+
+
+
+# SERVICE CHECK INTERLEAVE FACTOR
+# This variable determines how service checks are interleaved.
+# Interleaving the service checks allows for a more even
+# distribution of service checks and reduced load on remote
+# hosts.  Setting this value to 1 is equivalent to how versions
+# of Nagios previous to 0.0.5 did service checks.  Set this
+# value to s (smart) for automatic calculation of the interleave
+# factor unless you have a specific reason to change it.
+#       s       = Use "smart" interleave factor calculation
+#       x       = Use an interleave factor of x, where x is a
+#                 number greater than or equal to 1.
+
+service_interleave_factor=s
+
+
+
+# HOST INTER-CHECK DELAY METHOD
+# This is the method that Nagios should use when initially
+# "spreading out" host checks when it starts monitoring.  The
+# default is to use smart delay calculation, which will try to
+# space all host checks out evenly to minimize CPU load.
+# Using the dumb setting will cause all checks to be scheduled
+# at the same time (with no delay between them)!
+#	n	= None - don't use any delay between checks
+#	d	= Use a "dumb" delay of 1 second between checks
+#	s	= Use "smart" inter-check delay calculation
+#       x.xx    = Use an inter-check delay of x.xx seconds
+
+host_inter_check_delay_method=s
+
+
+
+# MAXIMUM HOST CHECK SPREAD
+# This variable determines the timeframe (in minutes) from the
+# program start time that an initial check of all hosts should
+# be completed.  Default is 30 minutes.
+
+max_host_check_spread=30
+
+
+
+# MAXIMUM CONCURRENT SERVICE CHECKS
+# This option allows you to specify the maximum number of 
+# service checks that can be run in parallel at any given time.
+# Specifying a value of 1 for this variable essentially prevents
+# any service checks from being parallelized.  A value of 0
+# will not restrict the number of concurrent checks that are
+# being executed.
+
+max_concurrent_checks=0
+
+
+
+# HOST AND SERVICE CHECK REAPER FREQUENCY
+# This is the frequency (in seconds!) that Nagios will process
+# the results of host and service checks.
+
+check_result_reaper_frequency=10
+
+
+
+
+# MAX CHECK RESULT REAPER TIME
+# This is the max amount of time (in seconds) that  a single
+# check result reaper event will be allowed to run before 
+# returning control back to Nagios so it can perform other
+# duties.
+
+max_check_result_reaper_time=30
+
+
+
+
+# CHECK RESULT PATH
+# This is directory where Nagios stores the results of host and
+# service checks that have not yet been processed.
+#
+# Note: Make sure that only one instance of Nagios has access
+# to this directory!  
+
+check_result_path=/var/nagios/spool/checkresults
+
+
+
+
+# MAX CHECK RESULT FILE AGE
+# This option determines the maximum age (in seconds) which check
+# result files are considered to be valid.  Files older than this 
+# threshold will be mercilessly deleted without further processing.
+
+max_check_result_file_age=3600
+
+
+
+
+# CACHED HOST CHECK HORIZON
+# This option determines the maximum amount of time (in seconds)
+# that the state of a previous host check is considered current.
+# Cached host states (from host checks that were performed more
+# recently that the timeframe specified by this value) can immensely
+# improve performance in regards to the host check logic.
+# Too high of a value for this option may result in inaccurate host
+# states being used by Nagios, while a lower value may result in a
+# performance hit for host checks.  Use a value of 0 to disable host
+# check caching.
+
+cached_host_check_horizon=15
+
+
+
+# CACHED SERVICE CHECK HORIZON
+# This option determines the maximum amount of time (in seconds)
+# that the state of a previous service check is considered current.
+# Cached service states (from service checks that were performed more
+# recently that the timeframe specified by this value) can immensely
+# improve performance in regards to predictive dependency checks.
+# Use a value of 0 to disable service check caching.
+
+cached_service_check_horizon=15
+
+
+
+# ENABLE PREDICTIVE HOST DEPENDENCY CHECKS
+# This option determines whether or not Nagios will attempt to execute
+# checks of hosts when it predicts that future dependency logic test
+# may be needed.  These predictive checks can help ensure that your
+# host dependency logic works well.
+# Values:
+#  0 = Disable predictive checks
+#  1 = Enable predictive checks (default)
+
+enable_predictive_host_dependency_checks=1
+
+
+
+# ENABLE PREDICTIVE SERVICE DEPENDENCY CHECKS
+# This option determines whether or not Nagios will attempt to execute
+# checks of service when it predicts that future dependency logic test
+# may be needed.  These predictive checks can help ensure that your
+# service dependency logic works well.
+# Values:
+#  0 = Disable predictive checks
+#  1 = Enable predictive checks (default)
+
+enable_predictive_service_dependency_checks=1
+
+
+
+# SOFT STATE DEPENDENCIES
+# This option determines whether or not Nagios will use soft state 
+# information when checking host and service dependencies. Normally 
+# Nagios will only use the latest hard host or service state when 
+# checking dependencies. If you want it to use the latest state (regardless
+# of whether its a soft or hard state type), enable this option. 
+# Values:
+#  0 = Don't use soft state dependencies (default) 
+#  1 = Use soft state dependencies 
+
+soft_state_dependencies=0
+
+
+
+# TIME CHANGE ADJUSTMENT THRESHOLDS
+# These options determine when Nagios will react to detected changes
+# in system time (either forward or backwards).
+
+#time_change_threshold=900
+
+
+
+# AUTO-RESCHEDULING OPTION
+# This option determines whether or not Nagios will attempt to
+# automatically reschedule active host and service checks to
+# "smooth" them out over time.  This can help balance the load on
+# the monitoring server.  
+# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
+# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
+
+auto_reschedule_checks=0
+
+
+
+# AUTO-RESCHEDULING INTERVAL
+# This option determines how often (in seconds) Nagios will
+# attempt to automatically reschedule checks.  This option only
+# has an effect if the auto_reschedule_checks option is enabled.
+# Default is 30 seconds.
+# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
+# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
+
+auto_rescheduling_interval=30
+
+
+
+# AUTO-RESCHEDULING WINDOW
+# This option determines the "window" of time (in seconds) that
+# Nagios will look at when automatically rescheduling checks.
+# Only host and service checks that occur in the next X seconds
+# (determined by this variable) will be rescheduled. This option
+# only has an effect if the auto_reschedule_checks option is
+# enabled.  Default is 180 seconds (3 minutes).
+# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
+# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
+
+auto_rescheduling_window=180
+
+
+
+# SLEEP TIME
+# This is the number of seconds to sleep between checking for system
+# events and service checks that need to be run.
+
+sleep_time=0.25
+
+
+
+# TIMEOUT VALUES
+# These options control how much time Nagios will allow various
+# types of commands to execute before killing them off.  Options
+# are available for controlling maximum time allotted for
+# service checks, host checks, event handlers, notifications, the
+# ocsp command, and performance data commands.  All values are in
+# seconds.
+
+service_check_timeout=60
+host_check_timeout=30
+event_handler_timeout=30
+notification_timeout=30
+ocsp_timeout=5
+perfdata_timeout=5
+
+
+
+# RETAIN STATE INFORMATION
+# This setting determines whether or not Nagios will save state
+# information for services and hosts before it shuts down.  Upon
+# startup Nagios will reload all saved service and host state
+# information before starting to monitor.  This is useful for 
+# maintaining long-term data on state statistics, etc, but will
+# slow Nagios down a bit when it (re)starts.  Since its only
+# a one-time penalty, I think its well worth the additional
+# startup delay.
+
+retain_state_information=1
+
+
+
+# STATE RETENTION FILE
+# This is the file that Nagios should use to store host and
+# service state information before it shuts down.  The state 
+# information in this file is also read immediately prior to
+# starting to monitor the network when Nagios is restarted.
+# This file is used only if the retain_state_information
+# variable is set to 1.
+
+state_retention_file=/var/nagios/retention.dat
+
+
+
+# RETENTION DATA UPDATE INTERVAL
+# This setting determines how often (in minutes) that Nagios
+# will automatically save retention data during normal operation.
+# If you set this value to 0, Nagios will not save retention
+# data at regular interval, but it will still save retention
+# data before shutting down or restarting.  If you have disabled
+# state retention, this option has no effect.
+
+retention_update_interval=60
+
+
+
+# USE RETAINED PROGRAM STATE
+# This setting determines whether or not Nagios will set 
+# program status variables based on the values saved in the
+# retention file.  If you want to use retained program status
+# information, set this value to 1.  If not, set this value
+# to 0.
+
+use_retained_program_state=1
+
+
+
+# USE RETAINED SCHEDULING INFO
+# This setting determines whether or not Nagios will retain
+# the scheduling info (next check time) for hosts and services
+# based on the values saved in the retention file.  If you
+# If you want to use retained scheduling info, set this
+# value to 1.  If not, set this value to 0.
+
+use_retained_scheduling_info=1
+
+
+
+# RETAINED ATTRIBUTE MASKS (ADVANCED FEATURE)
+# The following variables are used to specify specific host and
+# service attributes that should *not* be retained by Nagios during
+# program restarts.
+#
+# The values of the masks are bitwise ANDs of values specified
+# by the "MODATTR_" definitions found in include/common.h.  
+# For example, if you do not want the current enabled/disabled state
+# of flap detection and event handlers for hosts to be retained, you
+# would use a value of 24 for the host attribute mask...
+# MODATTR_EVENT_HANDLER_ENABLED (8) + MODATTR_FLAP_DETECTION_ENABLED (16) = 24
+
+# This mask determines what host attributes are not retained
+retained_host_attribute_mask=0
+
+# This mask determines what service attributes are not retained
+retained_service_attribute_mask=0
+
+# These two masks determine what process attributes are not retained.
+# There are two masks, because some process attributes have host and service
+# options.  For example, you can disable active host checks, but leave active
+# service checks enabled.
+retained_process_host_attribute_mask=0
+retained_process_service_attribute_mask=0
+
+# These two masks determine what contact attributes are not retained.
+# There are two masks, because some contact attributes have host and
+# service options.  For example, you can disable host notifications for
+# a contact, but leave service notifications enabled for them.
+retained_contact_host_attribute_mask=0
+retained_contact_service_attribute_mask=0
+
+
+
+# INTERVAL LENGTH
+# This is the seconds per unit interval as used in the
+# host/contact/service configuration files.  Setting this to 60 means
+# that each interval is one minute long (60 seconds).  Other settings
+# have not been tested much, so your mileage is likely to vary...
+
+interval_length=60
+
+
+
+# CHECK FOR UPDATES
+# This option determines whether Nagios will automatically check to
+# see if new updates (releases) are available.  It is recommend that you
+# enable this option to ensure that you stay on top of the latest critical
+# patches to Nagios.  Nagios is critical to you - make sure you keep it in
+# good shape.  Nagios will check once a day for new updates. Data collected
+# by Nagios Enterprises from the update check is processed in accordance 
+# with our privacy policy - see http://api.nagios.org for details.
+
+check_for_updates=1
+
+
+
+# BARE UPDATE CHECK
+# This option deterines what data Nagios will send to api.nagios.org when
+# it checks for updates.  By default, Nagios will send information on the 
+# current version of Nagios you have installed, as well as an indicator as
+# to whether this was a new installation or not.  Nagios Enterprises uses
+# this data to determine the number of users running specific version of 
+# Nagios.  Enable this option if you do not want this information to be sent.
+
+bare_update_check=0
+
+
+
+# AGGRESSIVE HOST CHECKING OPTION
+# If you don't want to turn on aggressive host checking features, set
+# this value to 0 (the default).  Otherwise set this value to 1 to
+# enable the aggressive check option.  Read the docs for more info
+# on what aggressive host check is or check out the source code in
+# base/checks.c
+
+use_aggressive_host_checking=0
+
+
+
+# SERVICE CHECK EXECUTION OPTION
+# This determines whether or not Nagios will actively execute
+# service checks when it initially starts.  If this option is 
+# disabled, checks are not actively made, but Nagios can still
+# receive and process passive check results that come in.  Unless
+# you're implementing redundant hosts or have a special need for
+# disabling the execution of service checks, leave this enabled!
+# Values: 1 = enable checks, 0 = disable checks
+
+execute_service_checks=1
+
+
+
+# PASSIVE SERVICE CHECK ACCEPTANCE OPTION
+# This determines whether or not Nagios will accept passive
+# service checks results when it initially (re)starts.
+# Values: 1 = accept passive checks, 0 = reject passive checks
+
+accept_passive_service_checks=1
+
+
+
+# HOST CHECK EXECUTION OPTION
+# This determines whether or not Nagios will actively execute
+# host checks when it initially starts.  If this option is 
+# disabled, checks are not actively made, but Nagios can still
+# receive and process passive check results that come in.  Unless
+# you're implementing redundant hosts or have a special need for
+# disabling the execution of host checks, leave this enabled!
+# Values: 1 = enable checks, 0 = disable checks
+
+execute_host_checks=1
+
+
+
+# PASSIVE HOST CHECK ACCEPTANCE OPTION
+# This determines whether or not Nagios will accept passive
+# host checks results when it initially (re)starts.
+# Values: 1 = accept passive checks, 0 = reject passive checks
+
+accept_passive_host_checks=1
+
+
+
+# NOTIFICATIONS OPTION
+# This determines whether or not Nagios will sent out any host or
+# service notifications when it is initially (re)started.
+# Values: 1 = enable notifications, 0 = disable notifications
+
+enable_notifications=1
+
+
+
+# EVENT HANDLER USE OPTION
+# This determines whether or not Nagios will run any host or
+# service event handlers when it is initially (re)started.  Unless
+# you're implementing redundant hosts, leave this option enabled.
+# Values: 1 = enable event handlers, 0 = disable event handlers
+
+enable_event_handlers=1
+
+
+
+# PROCESS PERFORMANCE DATA OPTION
+# This determines whether or not Nagios will process performance
+# data returned from service and host checks.  If this option is
+# enabled, host performance data will be processed using the
+# host_perfdata_command (defined below) and service performance
+# data will be processed using the service_perfdata_command (also
+# defined below).  Read the HTML docs for more information on
+# performance data.
+# Values: 1 = process performance data, 0 = do not process performance data
+
+process_performance_data=0
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA PROCESSING COMMANDS
+# These commands are run after every host and service check is
+# performed.  These commands are executed only if the
+# enable_performance_data option (above) is set to 1.  The command
+# argument is the short name of a command definition that you 
+# define in your host configuration file.  Read the HTML docs for
+# more information on performance data.
+
+#host_perfdata_command=process-host-perfdata
+#service_perfdata_command=process-service-perfdata
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILES
+# These files are used to store host and service performance data.
+# Performance data is only written to these files if the
+# enable_performance_data option (above) is set to 1.
+
+#host_perfdata_file=/tmp/host-perfdata
+#service_perfdata_file=/tmp/service-perfdata
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE TEMPLATES
+# These options determine what data is written (and how) to the
+# performance data files.  The templates may contain macros, special
+# characters (\t for tab, \r for carriage return, \n for newline)
+# and plain text.  A newline is automatically added after each write
+# to the performance data file.  Some examples of what you can do are
+# shown below.
+
+#host_perfdata_file_template=[HOSTPERFDATA]\t$TIMET$\t$HOSTNAME$\t$HOSTEXECUTIONTIME$\t$HOSTOUTPUT$\t$HOSTPERFDATA$
+#service_perfdata_file_template=[SERVICEPERFDATA]\t$TIMET$\t$HOSTNAME$\t$SERVICEDESC$\t$SERVICEEXECUTIONTIME$\t$SERVICELATENCY$\t$SERVICEOUTPUT$\t$SERVICEPERFDATA$
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE MODES
+# This option determines whether or not the host and service
+# performance data files are opened in write ("w") or append ("a")
+# mode. If you want to use named pipes, you should use the special
+# pipe ("p") mode which avoid blocking at startup, otherwise you will
+# likely want the defult append ("a") mode.
+
+#host_perfdata_file_mode=a
+#service_perfdata_file_mode=a
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE PROCESSING INTERVAL
+# These options determine how often (in seconds) the host and service
+# performance data files are processed using the commands defined
+# below.  A value of 0 indicates the files should not be periodically
+# processed.
+
+#host_perfdata_file_processing_interval=0
+#service_perfdata_file_processing_interval=0
+
+
+
+# HOST AND SERVICE PERFORMANCE DATA FILE PROCESSING COMMANDS
+# These commands are used to periodically process the host and
+# service performance data files.  The interval at which the
+# processing occurs is determined by the options above.
+
+#host_perfdata_file_processing_command=process-host-perfdata-file
+#service_perfdata_file_processing_command=process-service-perfdata-file
+
+
+
+# OBSESS OVER SERVICE CHECKS OPTION
+# This determines whether or not Nagios will obsess over service
+# checks and run the ocsp_command defined below.  Unless you're
+# planning on implementing distributed monitoring, do not enable
+# this option.  Read the HTML docs for more information on
+# implementing distributed monitoring.
+# Values: 1 = obsess over services, 0 = do not obsess (default)
+
+obsess_over_services=0
+
+
+
+# OBSESSIVE COMPULSIVE SERVICE PROCESSOR COMMAND
+# This is the command that is run for every service check that is
+# processed by Nagios.  This command is executed only if the
+# obsess_over_services option (above) is set to 1.  The command 
+# argument is the short name of a command definition that you
+# define in your host configuration file. Read the HTML docs for
+# more information on implementing distributed monitoring.
+
+#ocsp_command=somecommand
+
+
+
+# OBSESS OVER HOST CHECKS OPTION
+# This determines whether or not Nagios will obsess over host
+# checks and run the ochp_command defined below.  Unless you're
+# planning on implementing distributed monitoring, do not enable
+# this option.  Read the HTML docs for more information on
+# implementing distributed monitoring.
+# Values: 1 = obsess over hosts, 0 = do not obsess (default)
+
+obsess_over_hosts=0
+
+
+
+# OBSESSIVE COMPULSIVE HOST PROCESSOR COMMAND
+# This is the command that is run for every host check that is
+# processed by Nagios.  This command is executed only if the
+# obsess_over_hosts option (above) is set to 1.  The command 
+# argument is the short name of a command definition that you
+# define in your host configuration file. Read the HTML docs for
+# more information on implementing distributed monitoring.
+
+#ochp_command=somecommand
+
+
+
+# TRANSLATE PASSIVE HOST CHECKS OPTION
+# This determines whether or not Nagios will translate
+# DOWN/UNREACHABLE passive host check results into their proper
+# state for this instance of Nagios.  This option is useful
+# if you have distributed or failover monitoring setup.  In
+# these cases your other Nagios servers probably have a different
+# "view" of the network, with regards to the parent/child relationship
+# of hosts.  If a distributed monitoring server thinks a host
+# is DOWN, it may actually be UNREACHABLE from the point of
+# this Nagios instance.  Enabling this option will tell Nagios
+# to translate any DOWN or UNREACHABLE host states it receives
+# passively into the correct state from the view of this server.
+# Values: 1 = perform translation, 0 = do not translate (default)
+
+translate_passive_host_checks=0
+
+
+
+# PASSIVE HOST CHECKS ARE SOFT OPTION
+# This determines whether or not Nagios will treat passive host
+# checks as being HARD or SOFT.  By default, a passive host check
+# result will put a host into a HARD state type.  This can be changed
+# by enabling this option.
+# Values: 0 = passive checks are HARD, 1 = passive checks are SOFT
+
+passive_host_checks_are_soft=0
+
+
+
+# ORPHANED HOST/SERVICE CHECK OPTIONS
+# These options determine whether or not Nagios will periodically 
+# check for orphaned host service checks.  Since service checks are
+# not rescheduled until the results of their previous execution 
+# instance are processed, there exists a possibility that some
+# checks may never get rescheduled.  A similar situation exists for
+# host checks, although the exact scheduling details differ a bit
+# from service checks.  Orphaned checks seem to be a rare
+# problem and should not happen under normal circumstances.
+# If you have problems with service checks never getting
+# rescheduled, make sure you have orphaned service checks enabled.
+# Values: 1 = enable checks, 0 = disable checks
+
+check_for_orphaned_services=1
+check_for_orphaned_hosts=1
+
+
+
+# SERVICE FRESHNESS CHECK OPTION
+# This option determines whether or not Nagios will periodically
+# check the "freshness" of service results.  Enabling this option
+# is useful for ensuring passive checks are received in a timely
+# manner.
+# Values: 1 = enabled freshness checking, 0 = disable freshness checking
+
+check_service_freshness=1
+
+
+
+# SERVICE FRESHNESS CHECK INTERVAL
+# This setting determines how often (in seconds) Nagios will
+# check the "freshness" of service check results.  If you have
+# disabled service freshness checking, this option has no effect.
+
+service_freshness_check_interval=60
+
+
+
+# HOST FRESHNESS CHECK OPTION
+# This option determines whether or not Nagios will periodically
+# check the "freshness" of host results.  Enabling this option
+# is useful for ensuring passive checks are received in a timely
+# manner.
+# Values: 1 = enabled freshness checking, 0 = disable freshness checking
+
+check_host_freshness=0
+
+
+
+# HOST FRESHNESS CHECK INTERVAL
+# This setting determines how often (in seconds) Nagios will
+# check the "freshness" of host check results.  If you have
+# disabled host freshness checking, this option has no effect.
+
+host_freshness_check_interval=60
+
+
+
+
+# ADDITIONAL FRESHNESS THRESHOLD LATENCY
+# This setting determines the number of seconds that Nagios
+# will add to any host and service freshness thresholds that
+# it calculates (those not explicitly specified by the user).
+
+additional_freshness_latency=15
+
+
+
+
+# FLAP DETECTION OPTION
+# This option determines whether or not Nagios will try
+# and detect hosts and services that are "flapping".  
+# Flapping occurs when a host or service changes between
+# states too frequently.  When Nagios detects that a 
+# host or service is flapping, it will temporarily suppress
+# notifications for that host/service until it stops
+# flapping.  Flap detection is very experimental, so read
+# the HTML documentation before enabling this feature!
+# Values: 1 = enable flap detection
+#         0 = disable flap detection (default)
+
+enable_flap_detection=1
+
+
+
+# FLAP DETECTION THRESHOLDS FOR HOSTS AND SERVICES
+# Read the HTML documentation on flap detection for
+# an explanation of what this option does.  This option
+# has no effect if flap detection is disabled.
+
+low_service_flap_threshold=5.0
+high_service_flap_threshold=20.0
+low_host_flap_threshold=5.0
+high_host_flap_threshold=20.0
+
+
+
+# DATE FORMAT OPTION
+# This option determines how short dates are displayed. Valid options
+# include:
+#	us		(MM-DD-YYYY HH:MM:SS)
+#	euro    	(DD-MM-YYYY HH:MM:SS)
+#	iso8601		(YYYY-MM-DD HH:MM:SS)
+#	strict-iso8601	(YYYY-MM-DDTHH:MM:SS)
+#
+
+date_format=us
+
+
+
+
+# TIMEZONE OFFSET
+# This option is used to override the default timezone that this
+# instance of Nagios runs in.  If not specified, Nagios will use
+# the system configured timezone.
+#
+# NOTE: In order to display the correct timezone in the CGIs, you
+# will also need to alter the Apache directives for the CGI path 
+# to include your timezone.  Example:
+#
+#   <Directory "/usr/local/nagios/sbin/">
+#      SetEnv TZ "Australia/Brisbane"
+#      ...
+#   </Directory>
+
+#use_timezone=US/Mountain
+#use_timezone=Australia/Brisbane
+
+
+
+
+# P1.PL FILE LOCATION
+# This value determines where the p1.pl perl script (used by the
+# embedded Perl interpreter) is located.  If you didn't compile
+# Nagios with embedded Perl support, this option has no effect.
+
+p1_file = {{nagios_p1_pl}}
+
+
+
+# EMBEDDED PERL INTERPRETER OPTION
+# This option determines whether or not the embedded Perl interpreter
+# will be enabled during runtime.  This option has no effect if Nagios
+# has not been compiled with support for embedded Perl.
+# Values: 0 = disable interpreter, 1 = enable interpreter
+
+enable_embedded_perl=1
+
+
+
+# EMBEDDED PERL USAGE OPTION
+# This option determines whether or not Nagios will process Perl plugins
+# and scripts with the embedded Perl interpreter if the plugins/scripts
+# do not explicitly indicate whether or not it is okay to do so. Read
+# the HTML documentation on the embedded Perl interpreter for more 
+# information on how this option works.
+
+use_embedded_perl_implicitly=1
+
+
+
+# ILLEGAL OBJECT NAME CHARACTERS
+# This option allows you to specify illegal characters that cannot
+# be used in host names, service descriptions, or names of other
+# object types.
+
+illegal_object_name_chars=`~!$%^&*|'"<>?,()=
+
+
+
+# ILLEGAL MACRO OUTPUT CHARACTERS
+# This option allows you to specify illegal characters that are
+# stripped from macros before being used in notifications, event
+# handlers, etc.  This DOES NOT affect macros used in service or
+# host check commands.
+# The following macros are stripped of the characters you specify:
+#	$HOSTOUTPUT$
+#	$HOSTPERFDATA$
+#	$HOSTACKAUTHOR$
+#	$HOSTACKCOMMENT$
+#	$SERVICEOUTPUT$
+#	$SERVICEPERFDATA$
+#	$SERVICEACKAUTHOR$
+#	$SERVICEACKCOMMENT$
+
+illegal_macro_output_chars=`~$&|'"<>
+
+
+
+# REGULAR EXPRESSION MATCHING
+# This option controls whether or not regular expression matching
+# takes place in the object config files.  Regular expression
+# matching is used to match host, hostgroup, service, and service
+# group names/descriptions in some fields of various object types.
+# Values: 1 = enable regexp matching, 0 = disable regexp matching
+
+use_regexp_matching=0
+
+
+
+# "TRUE" REGULAR EXPRESSION MATCHING
+# This option controls whether or not "true" regular expression 
+# matching takes place in the object config files.  This option
+# only has an effect if regular expression matching is enabled
+# (see above).  If this option is DISABLED, regular expression
+# matching only occurs if a string contains wildcard characters
+# (* and ?).  If the option is ENABLED, regexp matching occurs
+# all the time (which can be annoying).
+# Values: 1 = enable true matching, 0 = disable true matching
+
+use_true_regexp_matching=0
+
+
+
+# ADMINISTRATOR EMAIL/PAGER ADDRESSES
+# The email and pager address of a global administrator (likely you).
+# Nagios never uses these values itself, but you can access them by
+# using the $ADMINEMAIL$ and $ADMINPAGER$ macros in your notification
+# commands.
+
+admin_email=nagios@localhost
+admin_pager=pagenagios@localhost
+
+
+
+# DAEMON CORE DUMP OPTION
+# This option determines whether or not Nagios is allowed to create
+# a core dump when it runs as a daemon.  Note that it is generally
+# considered bad form to allow this, but it may be useful for
+# debugging purposes.  Enabling this option doesn't guarantee that
+# a core file will be produced, but that's just life...
+# Values: 1 - Allow core dumps
+#         0 - Do not allow core dumps (default)
+
+daemon_dumps_core=0
+
+
+
+# LARGE INSTALLATION TWEAKS OPTION
+# This option determines whether or not Nagios will take some shortcuts
+# which can save on memory and CPU usage in large Nagios installations.
+# Read the documentation for more information on the benefits/tradeoffs
+# of enabling this option.
+# Values: 1 - Enabled tweaks
+#         0 - Disable tweaks (default)
+
+use_large_installation_tweaks=0
+
+
+
+# ENABLE ENVIRONMENT MACROS
+# This option determines whether or not Nagios will make all standard
+# macros available as environment variables when host/service checks
+# and system commands (event handlers, notifications, etc.) are
+# executed.  Enabling this option can cause performance issues in 
+# large installations, as it will consume a bit more memory and (more
+# importantly) consume more CPU.
+# Values: 1 - Enable environment variable macros (default)
+#         0 - Disable environment variable macros
+
+enable_environment_macros=1
+
+
+
+# CHILD PROCESS MEMORY OPTION
+# This option determines whether or not Nagios will free memory in
+# child processes (processed used to execute system commands and host/
+# service checks).  If you specify a value here, it will override
+# program defaults.
+# Value: 1 - Free memory in child processes
+#        0 - Do not free memory in child processes
+
+#free_child_process_memory=1
+
+
+
+# CHILD PROCESS FORKING BEHAVIOR
+# This option determines how Nagios will fork child processes
+# (used to execute system commands and host/service checks).  Normally
+# child processes are fork()ed twice, which provides a very high level
+# of isolation from problems.  Fork()ing once is probably enough and will
+# save a great deal on CPU usage (in large installs), so you might
+# want to consider using this.  If you specify a value here, it will
+# program defaults.
+# Value: 1 - Child processes fork() twice
+#        0 - Child processes fork() just once
+
+#child_processes_fork_twice=1
+
+
+
+# DEBUG LEVEL
+# This option determines how much (if any) debugging information will
+# be written to the debug file.  OR values together to log multiple
+# types of information.
+# Values: 
+#          -1 = Everything
+#          0 = Nothing
+#	   1 = Functions
+#          2 = Configuration
+#          4 = Process information
+#	   8 = Scheduled events
+#          16 = Host/service checks
+#          32 = Notifications
+#          64 = Event broker
+#          128 = External commands
+#          256 = Commands
+#          512 = Scheduled downtime
+#          1024 = Comments
+#          2048 = Macros
+
+debug_level=0
+
+
+
+# DEBUG VERBOSITY
+# This option determines how verbose the debug log out will be.
+# Values: 0 = Brief output
+#         1 = More detailed
+#         2 = Very detailed
+
+debug_verbosity=1
+
+
+
+# DEBUG FILE
+# This option determines where Nagios should write debugging information.
+
+debug_file=/var/log/nagios/nagios.debug
+
+
+
+# MAX DEBUG FILE SIZE
+# This option determines the maximum size (in bytes) of the debug file.  If
+# the file grows larger than this size, it will be renamed with a .old
+# extension.  If a file already exists with a .old extension it will
+# automatically be deleted.  This helps ensure your disk space usage doesn't
+# get out of control when debugging Nagios.
+
+max_debug_file_size=1000000
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/nagios.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/nagios.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/nagios.conf.j2
new file mode 100644
index 0000000..d8936a0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/nagios.conf.j2
@@ -0,0 +1,62 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+#
+# SAMPLE CONFIG SNIPPETS FOR APACHE WEB SERVER
+# Last Modified: 11-26-2005
+#
+# This file contains examples of entries that need
+# to be incorporated into your Apache web server
+# configuration file.  Customize the paths, etc. as
+# needed to fit your system.
+#
+
+ScriptAlias /nagios/cgi-bin "/usr/lib/nagios/cgi"
+
+<Directory "/usr/lib/nagios/cgi">
+#  SSLRequireSSL
+   Options ExecCGI
+   AllowOverride None
+   Order allow,deny
+   Allow from all
+#  Order deny,allow
+#  Deny from all
+#  Allow from 127.0.0.1
+   AuthName "Nagios Access"
+   AuthType Basic
+   AuthUserFile /etc/nagios/htpasswd.users
+   Require valid-user
+</Directory>
+
+Alias /nagios "/usr/share/nagios"
+
+<Directory "/usr/share/nagios">
+#  SSLRequireSSL
+   Options None
+   AllowOverride None
+   Order allow,deny
+   Allow from all
+#  Order deny,allow
+#  Deny from all
+#  Allow from 127.0.0.1
+   AuthName "Nagios Access"
+   AuthType Basic
+   AuthUserFile /etc/nagios/htpasswd.users
+   Require valid-user
+</Directory>
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/nagios.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/nagios.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/nagios.j2
new file mode 100644
index 0000000..01e21ac
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/nagios.j2
@@ -0,0 +1,146 @@
+#!/bin/sh
+# $Id$
+# Nagios	Startup script for the Nagios monitoring daemon
+#
+# chkconfig:	- 85 15
+# description:	Nagios is a service monitoring system
+# processname: nagios
+# config: /etc/nagios/nagios.cfg
+# pidfile: /var/nagios/nagios.pid
+#
+### BEGIN INIT INFO
+# Provides:		nagios
+# Required-Start:	$local_fs $syslog $network
+# Required-Stop:	$local_fs $syslog $network
+# Short-Description:    start and stop Nagios monitoring server
+# Description:		Nagios is is a service monitoring system 
+### END INIT INFO
+
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+# Source function library.
+. /etc/rc.d/init.d/functions
+
+prefix="/usr"
+exec_prefix="/usr"
+exec="/usr/sbin/nagios"
+prog="nagios"
+config="/etc/nagios/nagios.cfg"
+pidfile="{{nagios_pid_file}}"
+user="{{nagios_user}}"
+
+[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog
+
+lockfile=/var/lock/subsys/$prog
+
+start() {
+    [ -x $exec ] || exit 5
+    [ -f $config ] || exit 6
+    echo -n $"Starting $prog: "
+    daemon --user=$user $exec -d $config
+    retval=$?
+    echo
+    [ $retval -eq 0 ] && touch $lockfile
+    return $retval
+}
+
+stop() {
+    echo -n $"Stopping $prog: "
+    killproc -d 10 $exec
+    retval=$?
+    echo
+    [ $retval -eq 0 ] && rm -f $lockfile
+    return $retval
+}
+
+
+restart() {
+    stop
+    start
+}
+
+reload() {
+    echo -n $"Reloading $prog: "
+    killproc $exec -HUP
+    RETVAL=$?
+    echo
+}
+
+force_reload() {
+    restart
+}
+
+check_config() {
+        $nice runuser -s /bin/bash - $user -c "$corelimit >/dev/null 2>&1 ; $exec -v $config > /dev/null 2>&1"
+        RETVAL=$?
+        if [ $RETVAL -ne 0 ] ; then
+                echo -n $"Configuration validation failed"
+                failure
+                echo
+                exit 1
+
+        fi
+}
+
+
+case "$1" in
+    start)
+        status $prog && exit 0
+	check_config
+        $1
+        ;;
+    stop)
+        status $prog|| exit 0
+        $1
+        ;;
+    restart)
+	check_config
+        $1
+        ;;
+    reload)
+        status $prog || exit 7
+	check_config
+        $1
+        ;;
+    force-reload)
+	check_config
+        force_reload
+        ;;
+    status)
+        status $prog
+        ;;
+    condrestart|try-restart)
+        status $prog|| exit 0
+	check_config
+        restart
+        ;;
+    configtest)
+        echo -n  $"Checking config for $prog: "
+        check_config && success
+        echo
+	;;
+    *)
+        echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload|configtest}"
+        exit 2
+esac
+exit $?

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/resource.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/resource.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/resource.cfg.j2
new file mode 100644
index 0000000..920bfae
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/resource.cfg.j2
@@ -0,0 +1,51 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+###########################################################################
+#
+# RESOURCE.CFG - Sample Resource File for Nagios 3.2.3
+#
+# Last Modified: 09-10-2003
+#
+# You can define $USERx$ macros in this file, which can in turn be used
+# in command definitions in your host config file(s).  $USERx$ macros are
+# useful for storing sensitive information such as usernames, passwords,
+# etc.  They are also handy for specifying the path to plugins and
+# event handlers - if you decide to move the plugins or event handlers to
+# a different directory in the future, you can just update one or two
+# $USERx$ macros, instead of modifying a lot of command definitions.
+#
+# The CGIs will not attempt to read the contents of resource files, so
+# you can set restrictive permissions (600 or 660) on them.
+#
+# Nagios supports up to 32 $USERx$ macros ($USER1$ through $USER32$)
+#
+# Resource files may also be used to store configuration directives for
+# external data sources like MySQL...
+#
+###########################################################################
+
+# Sets $USER1$ to be the path to the plugins
+$USER1$={{plugins_dir}}
+
+# Sets $USER2$ to be the path to event handlers
+#$USER2$={{eventhandlers_dir}}
+
+# Store some usernames and passwords (hidden from the CGIs)
+#$USER3$=someuser
+#$USER4$=somepassword
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/files/oozieSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/files/oozieSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/files/oozieSmoke.sh
new file mode 100644
index 0000000..2446544
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/files/oozieSmoke.sh
@@ -0,0 +1,93 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+function getValueFromField {
+  xmllint $1 | grep "<name>$2</name>" -C 2 | grep '<value>' | cut -d ">" -f2 | cut -d "<" -f1
+  return $?
+}
+
+function checkOozieJobStatus {
+  local job_id=$1
+  local num_of_tries=$2
+  #default num_of_tries to 10 if not present
+  num_of_tries=${num_of_tries:-10}
+  local i=0
+  local rc=1
+  local cmd="source ${oozie_conf_dir}/oozie-env.sh ; /usr/bin/oozie job -oozie ${OOZIE_SERVER} -info $job_id"
+  su - ${smoke_test_user} -c "$cmd"
+  while [ $i -lt $num_of_tries ] ; do
+    cmd_output=`su - ${smoke_test_user} -c "$cmd"`
+    (IFS='';echo $cmd_output)
+    act_status=$(IFS='';echo $cmd_output | grep ^Status | cut -d':' -f2 | sed 's| ||g')
+    echo "workflow_status=$act_status"
+    if [ "RUNNING" == "$act_status" ]; then
+      #increment the couner and get the status again after waiting for 15 secs
+      sleep 15
+      (( i++ ))
+      elif [ "SUCCEEDED" == "$act_status" ]; then
+        rc=0;
+        break;
+      else
+        rc=1
+        break;
+      fi
+    done
+    return $rc
+}
+
+export oozie_conf_dir=$1
+export hadoop_conf_dir=$2
+export smoke_test_user=$3
+export security_enabled=$4
+export smoke_user_keytab=$5
+export kinit_path_local=$6
+
+export OOZIE_EXIT_CODE=0
+export JOBTRACKER=`getValueFromField ${hadoop_conf_dir}/mapred-site.xml mapred.job.tracker`
+export NAMENODE=`getValueFromField ${hadoop_conf_dir}/core-site.xml fs.default.name`
+export OOZIE_SERVER=`getValueFromField ${oozie_conf_dir}/oozie-site.xml oozie.base.url | tr '[:upper:]' '[:lower:]'`
+export OOZIE_EXAMPLES_DIR=`rpm -ql oozie-client | grep 'oozie-examples.tar.gz$' | xargs dirname`
+cd $OOZIE_EXAMPLES_DIR
+
+tar -zxf oozie-examples.tar.gz
+sed -i "s|nameNode=hdfs://localhost:8020|nameNode=$NAMENODE|g"  examples/apps/map-reduce/job.properties
+sed -i "s|nameNode=hdfs://localhost:9000|nameNode=$NAMENODE|g"  examples/apps/map-reduce/job.properties
+sed -i "s|jobTracker=localhost:8021|jobTracker=$JOBTRACKER|g" examples/apps/map-reduce/job.properties
+sed -i "s|jobTracker=localhost:9001|jobTracker=$JOBTRACKER|g" examples/apps/map-reduce/job.properties
+sed -i "s|oozie.wf.application.path=hdfs://localhost:9000|oozie.wf.application.path=$NAMENODE|g" examples/apps/map-reduce/job.properties
+
+if [[ $security_enabled == "true" ]]; then
+  kinitcmd="${kinit_path_local} -kt ${smoke_user_keytab} ${smoke_test_user}; "
+else 
+  kinitcmd=""
+fi
+
+su - ${smoke_test_user} -c "hadoop dfs -rmr examples"
+su - ${smoke_test_user} -c "hadoop dfs -rmr input-data"
+su - ${smoke_test_user} -c "hadoop dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples examples"
+su - ${smoke_test_user} -c "hadoop dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples/input-data input-data"
+
+cmd="${kinitcmd}source ${oozie_conf_dir}/oozie-env.sh ; /usr/bin/oozie job -oozie $OOZIE_SERVER -config $OOZIE_EXAMPLES_DIR/examples/apps/map-reduce/job.properties  -run"
+job_info=`su - ${smoke_test_user} -c "$cmd" | grep "job:"`
+job_id="`echo $job_info | cut -d':' -f2`"
+checkOozieJobStatus "$job_id"
+OOZIE_EXIT_CODE="$?"
+exit $OOZIE_EXIT_CODE

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/files/wrap_ooziedb.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/files/wrap_ooziedb.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/files/wrap_ooziedb.sh
new file mode 100644
index 0000000..97a513c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/files/wrap_ooziedb.sh
@@ -0,0 +1,31 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+OUT=`cd /var/tmp/oozie && /usr/lib/oozie/bin/ooziedb.sh "$@" 2>&1`
+EC=$?
+echo $OUT
+GRVAR=`echo ${OUT} | grep -o "java.lang.Exception: DB schema exists"`
+if [ ${EC} -ne 0 ] && [ -n "$GRVAR" ]
+then
+  exit 0
+else
+  exit $EC
+fi  

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/oozie.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/oozie.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/oozie.py
new file mode 100644
index 0000000..91da7ae
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/oozie.py
@@ -0,0 +1,99 @@
+from resource_management import *
+
+def oozie(is_server=False
+              ):
+  import params
+
+  XmlConfig( "oozie-site.xml",
+    conf_dir = params.conf_dir, 
+    configurations = params.config['configurations']['oozie-site'],
+    owner = params.oozie_user,
+    group = params.user_group,
+    mode = 0664
+  )
+  
+  Directory( params.conf_dir,
+    owner = params.oozie_user,
+    group = params.user_group
+  )
+  
+  TemplateConfig( format("{conf_dir}/oozie-env.sh"),
+    owner = params.oozie_user
+  )
+  
+  TemplateConfig( format("{conf_dir}/oozie-log4j.properties"),
+    owner = params.oozie_user
+  )
+
+  if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
+    Execute(format("/bin/sh -c 'cd /usr/lib/ambari-agent/ &&\
+    curl -kf --retry 5 {jdk_location}{check_db_connection_jar_name}\
+     -o {check_db_connection_jar_name}'"),
+      not_if  = format("[ -f {check_db_connection_jar} ]")
+    )
+    
+  oozie_ownership( )
+  
+  if is_server:      
+    oozie_server_specific( )
+  
+def oozie_ownership(
+):
+  import params
+  
+  File ( format("{conf_dir}/adminusers.txt"),
+    owner = params.oozie_user,
+    group = params.user_group
+  )
+
+  File ( format("{conf_dir}/hadoop-config.xml"),
+    owner = params.oozie_user,
+    group = params.user_group
+  )
+
+  File ( format("{conf_dir}/oozie-default.xml"),
+    owner = params.oozie_user,
+    group = params.user_group
+  )
+
+  Directory ( format("{conf_dir}/action-conf"),
+    owner = params.oozie_user,
+    group = params.user_group
+  )
+
+  File ( format("{conf_dir}/action-conf/hive.xml"),
+    owner = params.oozie_user,
+    group = params.user_group
+  )
+  
+def oozie_server_specific(
+):
+  import params
+  
+  oozie_server_directorties = [params.oozie_pid_dir, params.oozie_log_dir, params.oozie_tmp_dir, params.oozie_data_dir, params.oozie_lib_dir, params.oozie_webapps_dir]            
+  Directory( oozie_server_directorties,
+    owner = params.oozie_user,
+    mode = 0755,
+    recursive = True
+  )
+       
+  cmd1 = "cd /usr/lib/oozie && tar -xvf oozie-sharelib.tar.gz"
+  cmd2 =  format("cd /usr/lib/oozie && mkdir -p {oozie_tmp_dir}")
+  
+  # this is different for HDP2
+  cmd3 = format("cd /usr/lib/oozie && chown {oozie_user}:{user_group} {oozie_tmp_dir}")
+  if params.jdbc_driver_name=="com.mysql.jdbc.Driver" or params.jdbc_driver_name=="oracle.jdbc.driver.OracleDriver":
+    cmd3 += format(" && mkdir -p {oozie_libext_dir} && cp {jdbc_driver_jar} {oozie_libext_dir}")
+    
+  # this is different for HDP2
+  cmd4 = format("cd {oozie_tmp_dir} && /usr/lib/oozie/bin/oozie-setup.sh -hadoop 0.20.200 {hadoop_jar_location} -extjs {ext_js_path} {jar_option} {jar_path}")
+  
+  no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
+  Execute( [cmd1, cmd2, cmd3],
+    not_if  = no_op_test
+  )
+  Execute( cmd4,
+    user = params.oozie_user,
+    not_if  = no_op_test
+  )
+  

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/oozie_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/oozie_client.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/oozie_client.py
new file mode 100644
index 0000000..23fdc12
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/oozie_client.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from oozie import oozie
+from oozie_service import oozie_service
+
+         
+class OozieClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+    
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    oozie(is_server=False)
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+    
+def main():
+  command_type = sys.argv[1] if len(sys.argv)>1 else "install"
+  print "Running "+command_type
+  command_data_file = '/root/workspace/Oozie/input.json'
+  basedir = '/root/workspace/Oozie/main'
+  sys.argv = ["", command_type, command_data_file, basedir]
+  
+  OozieClient().execute()
+  
+if __name__ == "__main__":
+  #main()
+  OozieClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/oozie_server.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/oozie_server.py
new file mode 100644
index 0000000..eca2a56
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/oozie_server.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from oozie import oozie
+from oozie_service import oozie_service
+
+         
+class OozieServer(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+    
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    oozie(is_server=True)
+    
+  def start(self, env):
+    import params
+    env.set_params(params)
+    oozie_service(action='start')
+    
+  def stop(self, env):
+    import params
+    env.set_params(params)
+    oozie_service(action='stop')
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.pid_file)
+    
+def main():
+  command_type = sys.argv[1] if len(sys.argv)>1 else "start"
+  print "Running "+command_type
+  command_data_file = '/root/workspace/Oozie/input.json'
+  basedir = '/root/workspace/Oozie/main'
+  sys.argv = ["", command_type, command_data_file, basedir]
+  
+  OozieServer().execute()
+  
+if __name__ == "__main__":
+  #main()
+  OozieServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/oozie_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/oozie_service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/oozie_service.py
new file mode 100644
index 0000000..1d8767c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/oozie_service.py
@@ -0,0 +1,45 @@
+from resource_management import *
+
+def oozie_service(action = 'start'): # 'start' or 'stop'
+  import params
+
+  kinit_if_needed = format("{kinit_path_local} -kt {oozie_keytab} {oozie_principal};") if params.security_enabled else ""
+  no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
+  
+  if action == 'start':
+    start_cmd = format("cd {oozie_tmp_dir} && /usr/lib/oozie/bin/oozie-start.sh")
+    
+    if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
+      db_connection_check_command = format("{java_home}/bin/java -cp {check_db_connection_jar}:{jdbc_driver_jar} org.apache.ambari.server.DBConnectionVerification {oozie_jdbc_connection_url} {oozie_metastore_user_name} {oozie_metastore_user_passwd} {jdbc_driver_name}")
+    else:
+      db_connection_check_command = None
+      
+    cmd1 =  format("cd {oozie_tmp_dir} && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run")
+    cmd2 =  format("{kinit_if_needed} hadoop dfs -put /usr/lib/oozie/share {oozie_hdfs_user_dir} ; hadoop dfs -chmod -R 755 {oozie_hdfs_user_dir}/share")
+      
+    if db_connection_check_command:
+      Execute( db_connection_check_command)
+                  
+    Execute( cmd1,
+      user = params.oozie_user,
+      not_if  = no_op_test,
+      ignore_failures = True
+    ) 
+    
+    Execute( cmd2,
+      user = params.oozie_user,       
+      not_if = format("{kinit_if_needed} hadoop dfs -ls /user/oozie/share | awk 'BEGIN {{count=0;}} /share/ {{count++}} END {{if (count > 0) {{exit 0}} else {{exit 1}}}}'")
+    )
+    
+    Execute( start_cmd,
+      user = params.oozie_user,
+      not_if  = no_op_test,
+    )
+  elif action == 'stop':
+    stop_cmd  = format("su - {oozie_user} -c  'cd {oozie_tmp_dir} && /usr/lib/oozie/bin/oozie-stop.sh' && rm -f {pid_file}")
+    Execute( stop_cmd,
+      only_if  = no_op_test
+    )
+
+  
+  

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/params.py
new file mode 100644
index 0000000..0466ad8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/params.py
@@ -0,0 +1,64 @@
+from resource_management import *
+import status_params
+
+# server configurations
+config = Script.get_config()
+
+oozie_user = config['configurations']['global']['oozie_user']
+smokeuser = config['configurations']['global']['smokeuser']
+conf_dir = "/etc/oozie/conf"
+hadoop_conf_dir = "/etc/hadoop/conf"
+user_group = config['configurations']['global']['user_group']
+jdk_location = config['hostLevelParams']['jdk_location']
+check_db_connection_jar_name = "DBConnectionVerification.jar"
+check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
+hadoop_prefix = "/usr"
+oozie_tmp_dir = "/var/tmp/oozie"
+oozie_hdfs_user_dir = format("/user/{oozie_user}")
+oozie_pid_dir = status_params.oozie_pid_dir
+pid_file = status_params.pid_file
+hadoop_jar_location = "/usr/lib/hadoop/"
+# for HDP2 it's "/usr/share/HDP-oozie/ext-2.2.zip"
+ext_js_path = "/usr/share/HDP-oozie/ext.zip"
+oozie_libext_dir = "/usr/lib/oozie/libext"
+lzo_enabled = config['configurations']['global']['lzo_enabled']
+security_enabled = config['configurations']['global']['security_enabled']
+
+kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+oozie_service_keytab = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.keytab.file']
+oozie_principal = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.kerberos.principal']
+smokeuser_keytab = config['configurations']['global']['smokeuser_keytab']
+oozie_keytab = config['configurations']['global']['oozie_keytab']
+
+oracle_driver_jar_name = "ojdbc6.jar"
+java_share_dir = "/usr/share/java"
+
+java_home = config['hostLevelParams']['java_home']
+oozie_metastore_user_name = config['configurations']['oozie-site']['oozie.service.JPAService.jdbc.username']
+oozie_metastore_user_passwd = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.password","")
+oozie_jdbc_connection_url = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.url", "")
+oozie_log_dir = config['configurations']['global']['oozie_log_dir']
+oozie_data_dir = config['configurations']['global']['oozie_data_dir']
+oozie_lib_dir = "/var/lib/oozie/"
+oozie_webapps_dir = "/var/lib/oozie/oozie-server/webapps/"
+
+jdbc_driver_name = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.driver", "")
+
+if jdbc_driver_name == "com.mysql.jdbc.Driver":
+  jdbc_driver_jar = "/usr/share/java/mysql-connector-java.jar"
+elif jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
+  jdbc_driver_jar = "/usr/share/java/ojdbc6.jar"
+else:
+  jdbc_driver_jar = ""
+  
+if lzo_enabled or jdbc_driver_name:
+  jar_option = "-jars"         
+else:
+  jar_option = ""
+  
+lzo_jar_suffix = "/usr/lib/hadoop/lib/hadoop-lzo-0.5.0.jar" if lzo_enabled else ""
+  
+if lzo_enabled and jdbc_driver_name:
+    jar_path = format("{lzo_jar_suffix}:{jdbc_driver_jar}")        
+else:
+    jar_path = "{lzo_jar_suffix}{jdbc_driver_jar}"
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/service_check.py
new file mode 100644
index 0000000..7dbfc87
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/service_check.py
@@ -0,0 +1,47 @@
+from resource_management import *
+
+class OozieServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    
+    # on HDP2 this file is different
+    smoke_test_file_name = 'oozieSmoke.sh'
+
+    oozie_smoke_shell_file( smoke_test_file_name)
+  
+def oozie_smoke_shell_file(
+  file_name
+):
+  import params
+
+  File( format("/tmp/{file_name}"),
+    content = StaticFile(file_name),
+    mode = 0755
+  )
+  
+  if params.security_enabled:
+    sh_cmd = format("sh /tmp/{file_name} {conf_dir} {hadoop_conf_dir} {smokeuser} {security_enabled} {smokeuser_keytab} {kinit_path_local}")
+  else:
+    sh_cmd = format("sh /tmp/{file_name} {conf_dir} {hadoop_conf_dir} {smokeuser} {security_enabled}")
+
+  Execute( format("/tmp/{file_name}"),
+    command   = sh_cmd,
+    tries     = 3,
+    try_sleep = 5,
+    logoutput = True
+  )
+    
+def main():
+  import sys
+  command_type = 'service_check'
+  command_data_file = '/root/workspace/Oozie/input.json'
+  basedir = '/root/workspace/Oozie/main'
+  sys.argv = ["", command_type, command_data_file, basedir]
+  
+  OozieServiceCheck().execute()
+  
+if __name__ == "__main__":
+  OozieServiceCheck().execute()
+  #main()
+  

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/status_params.py
new file mode 100644
index 0000000..c44fcf4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/scripts/status_params.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+oozie_pid_dir = config['configurations']['global']['oozie_pid_dir']
+pid_file = format("{oozie_pid_dir}/oozie.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/templates/oozie-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/templates/oozie-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/templates/oozie-env.sh.j2
new file mode 100644
index 0000000..270a1a8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/templates/oozie-env.sh.j2
@@ -0,0 +1,64 @@
+#!/bin/bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+# 
+#      http://www.apache.org/licenses/LICENSE-2.0
+# 
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#Set JAVA HOME
+export JAVA_HOME={{java_home}}
+
+# Set Oozie specific environment variables here.
+
+# Settings for the Embedded Tomcat that runs Oozie
+# Java System properties for Oozie should be specified in this variable
+#
+# export CATALINA_OPTS=
+
+# Oozie configuration file to load from Oozie configuration directory
+#
+# export OOZIE_CONFIG_FILE=oozie-site.xml
+
+# Oozie logs directory
+#
+export OOZIE_LOG={{oozie_log_dir}}
+
+# Oozie pid directory
+#
+export CATALINA_PID={{pid_file}}
+
+#Location of the data for oozie
+export OOZIE_DATA={{oozie_data_dir}}
+
+# Oozie Log4J configuration file to load from Oozie configuration directory
+#
+# export OOZIE_LOG4J_FILE=oozie-log4j.properties
+
+# Reload interval of the Log4J configuration file, in seconds
+#
+# export OOZIE_LOG4J_RELOAD=10
+
+# The port Oozie server runs
+#
+# export OOZIE_HTTP_PORT=11000
+
+# The host name Oozie server runs on
+#
+# export OOZIE_HTTP_HOSTNAME=`hostname -f`
+
+# The base URL for callback URLs to Oozie
+#
+# export OOZIE_BASE_URL="http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie"
+export JAVA_LIBRARY_PATH=/usr/lib/hadoop/lib/native/Linux-amd64-64

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/templates/oozie-log4j.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/templates/oozie-log4j.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/templates/oozie-log4j.properties.j2
new file mode 100644
index 0000000..e4a2662
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/OOZIE/package/templates/oozie-log4j.properties.j2
@@ -0,0 +1,74 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License. See accompanying LICENSE file.
+#
+
+# If the Java System property 'oozie.log.dir' is not defined at Oozie start up time
+# XLogService sets its value to '${oozie.home}/logs'
+
+log4j.appender.oozie=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozie.DatePattern='.'yyyy-MM-dd-HH
+log4j.appender.oozie.File=${oozie.log.dir}/oozie.log
+log4j.appender.oozie.Append=true
+log4j.appender.oozie.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozie.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.oozieops=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozieops.DatePattern='.'yyyy-MM-dd
+log4j.appender.oozieops.File=${oozie.log.dir}/oozie-ops.log
+log4j.appender.oozieops.Append=true
+log4j.appender.oozieops.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozieops.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.oozieinstrumentation=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozieinstrumentation.DatePattern='.'yyyy-MM-dd
+log4j.appender.oozieinstrumentation.File=${oozie.log.dir}/oozie-instrumentation.log
+log4j.appender.oozieinstrumentation.Append=true
+log4j.appender.oozieinstrumentation.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozieinstrumentation.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.oozieaudit=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozieaudit.DatePattern='.'yyyy-MM-dd
+log4j.appender.oozieaudit.File=${oozie.log.dir}/oozie-audit.log
+log4j.appender.oozieaudit.Append=true
+log4j.appender.oozieaudit.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozieaudit.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.openjpa=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.openjpa.DatePattern='.'yyyy-MM-dd
+log4j.appender.openjpa.File=${oozie.log.dir}/oozie-jpa.log
+log4j.appender.openjpa.Append=true
+log4j.appender.openjpa.layout=org.apache.log4j.PatternLayout
+log4j.appender.openjpa.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.logger.openjpa=INFO, openjpa
+log4j.logger.oozieops=INFO, oozieops
+log4j.logger.oozieinstrumentation=ALL, oozieinstrumentation
+log4j.logger.oozieaudit=ALL, oozieaudit
+log4j.logger.org.apache.oozie=INFO, oozie
+log4j.logger.org.apache.hadoop=WARN, oozie
+log4j.logger.org.mortbay=WARN, oozie
+log4j.logger.org.hsqldb=WARN, oozie
+log4j.logger.org.apache.hadoop.security.authentication.server=INFO, oozie

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/files/pigSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/files/pigSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/files/pigSmoke.sh
new file mode 100644
index 0000000..a22456e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/files/pigSmoke.sh
@@ -0,0 +1,18 @@
+/*Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License */
+
+A = load 'passwd' using PigStorage(':');
+B = foreach A generate \$0 as id;
+store B into 'pigsmoke.out';


[09/37] AMBARI-4341. Rename 2.0.8 to 2.1.1 in the stack definition. (mahadev)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/files/checkForFormat.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/files/checkForFormat.sh
new file mode 100644
index 0000000..d14091a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/files/checkForFormat.sh
@@ -0,0 +1,62 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+export hdfs_user=$1
+shift
+export conf_dir=$1
+shift
+export mark_dir=$1
+shift
+export name_dirs=$*
+
+export EXIT_CODE=0
+export command="namenode -format"
+export list_of_non_empty_dirs=""
+
+mark_file=/var/run/hadoop/hdfs/namenode-formatted
+if [[ -f ${mark_file} ]] ; then
+  rm -f ${mark_file}
+  mkdir -p ${mark_dir}
+fi
+
+if [[ ! -d $mark_dir ]] ; then
+  for dir in `echo $name_dirs | tr ',' ' '` ; do
+    echo "NameNode Dirname = $dir"
+    cmd="ls $dir | wc -l  | grep -q ^0$"
+    eval $cmd
+    if [[ $? -ne 0 ]] ; then
+      (( EXIT_CODE = $EXIT_CODE + 1 ))
+      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
+    fi
+  done
+
+  if [[ $EXIT_CODE == 0 ]] ; then
+    su - ${hdfs_user} -c "yes Y | hadoop --config ${conf_dir} ${command}"
+  else
+    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
+  fi
+else
+  echo "${mark_dir} exists. Namenode DFS already formatted"
+fi
+
+exit $EXIT_CODE
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/files/checkWebUI.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/files/checkWebUI.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/files/checkWebUI.py
new file mode 100644
index 0000000..f8e9c1a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/files/checkWebUI.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import optparse
+import httplib
+
+#
+# Main.
+#
+def main():
+  parser = optparse.OptionParser(usage="usage: %prog [options] component ")
+  parser.add_option("-m", "--hosts", dest="hosts", help="Comma separated hosts list for WEB UI to check it availability")
+  parser.add_option("-p", "--port", dest="port", help="Port of WEB UI to check it availability")
+
+  (options, args) = parser.parse_args()
+  
+  hosts = options.hosts.split(',')
+  port = options.port
+
+  for host in hosts:
+    try:
+      conn = httplib.HTTPConnection(host, port)
+      # This can be modified to get a partial url part to be sent with request
+      conn.request("GET", "/")
+      httpCode = conn.getresponse().status
+      conn.close()
+    except Exception:
+      httpCode = 404
+
+    if httpCode != 200:
+      print "Cannot access WEB UI on: http://" + host + ":" + port
+      exit(1)
+      
+
+if __name__ == "__main__":
+  main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/datanode.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/datanode.py
new file mode 100644
index 0000000..eaa27cf
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/datanode.py
@@ -0,0 +1,57 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from hdfs_datanode import datanode
+
+
+class DataNode(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env)
+    env.set_params(params)
+
+  def start(self, env):
+    import params
+
+    env.set_params(params)
+    self.config(env)
+    datanode(action="start")
+
+  def stop(self, env):
+    import params
+
+    env.set_params(params)
+    datanode(action="stop")
+
+  def config(self, env):
+    import params
+
+    datanode(action="configure")
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    check_process_status(status_params.datanode_pid_file)
+
+
+if __name__ == "__main__":
+  DataNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_client.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_client.py
new file mode 100644
index 0000000..ec24c7d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_client.py
@@ -0,0 +1,52 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from utils import service
+
+
+class HdfsClient(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env)
+    env.set_params(params)
+    self.config(env)
+
+  def start(self, env):
+    import params
+
+    env.set_params(params)
+
+  def stop(self, env):
+    import params
+
+    env.set_params(params)
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+  def config(self, env):
+    import params
+
+    pass
+
+
+if __name__ == "__main__":
+  HdfsClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_datanode.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_datanode.py
new file mode 100644
index 0000000..e0b6c39
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_datanode.py
@@ -0,0 +1,56 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from utils import service
+
+
+def datanode(action=None):
+  import params
+
+  if action == "configure":
+    Directory(params.dfs_domain_socket_dir,
+              recursive=True,
+              mode=0750,
+              owner=params.hdfs_user,
+              group=params.user_group)
+    Directory(params.dfs_data_dir,
+              recursive=True,
+              mode=0755,
+              owner=params.hdfs_user,
+              group=params.user_group)
+
+  if action == "start":
+    service(
+      action=action, name="datanode",
+      user=params.hdfs_user,
+      create_pid_dir=True,
+      create_log_dir=True,
+      keytab=params.dfs_datanode_keytab_file,
+      principal=params.dfs_datanode_kerberos_principal
+    )
+  if action == "stop":
+    service(
+      action=action, name="datanode",
+      user=params.hdfs_user,
+      create_pid_dir=True,
+      create_log_dir=True,
+      keytab=params.dfs_datanode_keytab_file,
+      principal=params.dfs_datanode_kerberos_principal
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_namenode.py
new file mode 100644
index 0000000..8b29cc3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_namenode.py
@@ -0,0 +1,212 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from utils import service
+from utils import hdfs_directory
+import urlparse
+
+
+def namenode(action=None, format=True):
+  import params
+  #we need this directory to be present before any action(HA manual steps for
+  #additional namenode)
+  if action == "configure":
+    create_name_dirs(params.dfs_name_dir)
+
+  if action == "start":
+    if format:
+      format_namenode()
+      pass
+
+    File(params.exclude_file_path,
+         content=Template("exclude_hosts_list.j2"),
+         owner=params.hdfs_user,
+         group=params.user_group
+    )
+
+    service(
+      action="start", name="namenode", user=params.hdfs_user,
+      keytab=params.dfs_namenode_keytab_file,
+      create_pid_dir=True,
+      create_log_dir=True,
+      principal=params.dfs_namenode_kerberos_principal
+    )
+
+    # TODO: extract creating of dirs to different services
+    create_app_directories()
+    create_user_directories()
+
+  if action == "stop":
+    service(
+      action="stop", name="namenode", user=params.hdfs_user,
+      keytab=params.dfs_namenode_keytab_file,
+      principal=params.dfs_namenode_kerberos_principal
+    )
+
+  if action == "decommission":
+    decommission()
+
+def create_name_dirs(directories):
+  import params
+
+  dirs = directories.split(",")
+  Directory(dirs,
+            mode=0755,
+            owner=params.hdfs_user,
+            group=params.user_group,
+            recursive=True
+  )
+
+
+def create_app_directories():
+  import params
+
+  hdfs_directory(name="/tmp",
+                 owner=params.hdfs_user,
+                 mode="777"
+  )
+  #mapred directories
+  if params.has_histroryserver:
+    hdfs_directory(name="/mapred",
+                   owner=params.mapred_user
+    )
+    hdfs_directory(name="/mapred/system",
+                   owner=params.hdfs_user
+    )
+    #hbase directories
+  if len(params.hbase_master_hosts) != 0:
+    hdfs_directory(name=params.hbase_hdfs_root_dir,
+                   owner=params.hbase_user
+    )
+    hdfs_directory(name=params.hbase_staging_dir,
+                   owner=params.hbase_user,
+                   mode="711"
+    )
+    #hive directories
+  if len(params.hive_server_host) != 0:
+    hdfs_directory(name=params.hive_apps_whs_dir,
+                   owner=params.hive_user,
+                   mode="777"
+    )
+  if len(params.hcat_server_hosts) != 0:
+    hdfs_directory(name=params.webhcat_apps_dir,
+                   owner=params.webhcat_user,
+                   mode="755"
+    )
+  if len(params.hs_host) != 0:
+    if params.yarn_log_aggregation_enabled:
+      hdfs_directory(name=params.yarn_nm_app_log_dir,
+                     owner=params.yarn_user,
+                     group=params.user_group,
+                     mode="777",
+                     recursive_chmod=True
+      )
+    hdfs_directory(name=params.mapreduce_jobhistory_intermediate_done_dir,
+                   owner=params.mapred_user,
+                   group=params.user_group,
+                   mode="777"
+    )
+
+    hdfs_directory(name=params.mapreduce_jobhistory_done_dir,
+                   owner=params.mapred_user,
+                   group=params.user_group,
+                   mode="1777"
+    )
+
+  if params.has_falcon_host:
+    if params.falcon_store_uri[0:4] == "hdfs":
+      hdfs_directory(name=params.store_uri,
+                     owner=params.falcon_user,
+                     mode="755"
+      )
+
+def create_user_directories():
+  import params
+
+  hdfs_directory(name=params.smoke_hdfs_user_dir,
+                 owner=params.smoke_user,
+                 mode=params.smoke_hdfs_user_mode
+  )
+
+  if params.has_hive_server_host:
+    hdfs_directory(name=params.hive_hdfs_user_dir,
+                   owner=params.hive_user,
+                   mode=params.hive_hdfs_user_mode
+    )
+
+  if params.has_hcat_server_host:
+    if params.hcat_hdfs_user_dir != params.webhcat_hdfs_user_dir:
+      hdfs_directory(name=params.hcat_hdfs_user_dir,
+                     owner=params.hcat_user,
+                     mode=params.hcat_hdfs_user_mode
+      )
+    hdfs_directory(name=params.webhcat_hdfs_user_dir,
+                   owner=params.webhcat_user,
+                   mode=params.webhcat_hdfs_user_mode
+    )
+
+  if params.has_oozie_server:
+    hdfs_directory(name=params.oozie_hdfs_user_dir,
+                   owner=params.oozie_user,
+                   mode=params.oozie_hdfs_user_mode
+    )
+
+
+def format_namenode(force=None):
+  import params
+
+  mark_dir = params.namenode_formatted_mark_dir
+  dfs_name_dir = params.dfs_name_dir
+  hdfs_user = params.hdfs_user
+  hadoop_conf_dir = params.hadoop_conf_dir
+
+  if not params.dfs_ha_enabled:
+    if force:
+      ExecuteHadoop('namenode -format',
+                    kinit_override=True)
+    else:
+      File('/tmp/checkForFormat.sh',
+           content=StaticFile("checkForFormat.sh"),
+           mode=0755)
+      Execute(format(
+        "sh /tmp/checkForFormat.sh {hdfs_user} {hadoop_conf_dir} {mark_dir} "
+        "{dfs_name_dir}"),
+              not_if=format("test -d {mark_dir}"),
+              path="/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin")
+    Execute(format("mkdir -p {mark_dir}"))
+
+
+def decommission():
+  import params
+
+  hdfs_user = params.hdfs_user
+  conf_dir = params.hadoop_conf_dir
+  user_group = params.user_group
+
+  File(params.exclude_file_path,
+       content=Template("exclude_hosts_list.j2"),
+       owner=hdfs_user,
+       group=user_group
+  )
+
+  ExecuteHadoop('dfsadmin -refreshNodes',
+                user=hdfs_user,
+                conf_dir=conf_dir,
+                kinit_override=True)

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_snamenode.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_snamenode.py
new file mode 100644
index 0000000..a943455
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/hdfs_snamenode.py
@@ -0,0 +1,53 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from utils import service
+from utils import hdfs_directory
+
+
+def snamenode(action=None, format=False):
+  import params
+
+  if action == "configure":
+    Directory(params.fs_checkpoint_dir,
+              recursive=True,
+              mode=0755,
+              owner=params.hdfs_user,
+              group=params.user_group)
+  elif action == "start":
+    service(
+      action=action,
+      name="secondarynamenode",
+      user=params.hdfs_user,
+      create_pid_dir=True,
+      create_log_dir=True,
+      keytab=params.dfs_secondary_namenode_keytab_file,
+      principal=params.dfs_secondary_namenode_kerberos_principal
+    )
+  elif action == "stop":
+    service(
+      action=action,
+      name="secondarynamenode",
+      user=params.hdfs_user,
+      create_pid_dir=True,
+      create_log_dir=True,
+      keytab=params.dfs_secondary_namenode_keytab_file,
+      principal=params.dfs_secondary_namenode_kerberos_principal
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/journalnode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/journalnode.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/journalnode.py
new file mode 100644
index 0000000..fd355cc
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/journalnode.py
@@ -0,0 +1,74 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from utils import service
+
+
+class JournalNode(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env)
+    env.set_params(params)
+
+  def start(self, env):
+    import params
+
+    env.set_params(params)
+    self.config(env)
+    service(
+      action="start", name="journalnode", user=params.hdfs_user,
+      create_pid_dir=True,
+      create_log_dir=True,
+      keytab=params.dfs_journalnode_keytab_file,
+      principal=params.dfs_journalnode_kerberos_principal
+    )
+
+  def stop(self, env):
+    import params
+
+    env.set_params(params)
+    service(
+      action="stop", name="journalnode", user=params.hdfs_user,
+      create_pid_dir=True,
+      create_log_dir=True,
+      keytab=params.dfs_journalnode_keytab_file,
+      principal=params.dfs_journalnode_kerberos_principal
+    )
+
+  def config(self, env):
+    import params
+
+    Directory(params.jn_edits_dir,
+              recursive=True,
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+    pass
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    check_process_status(status_params.journalnode_pid_file)
+
+
+if __name__ == "__main__":
+  JournalNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/namenode.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/namenode.py
new file mode 100644
index 0000000..deb01d5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/namenode.py
@@ -0,0 +1,68 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from hdfs_namenode import namenode
+
+
+class NameNode(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env)
+    env.set_params(params)
+    #TODO remove when config action will be implemented
+    self.config(env)
+
+  def start(self, env):
+    import params
+
+    env.set_params(params)
+    self.config(env)
+    namenode(action="start")
+
+  def stop(self, env):
+    import params
+
+    env.set_params(params)
+    namenode(action="stop")
+
+  def config(self, env):
+    import params
+
+    env.set_params(params)
+    namenode(action="configure")
+    pass
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    check_process_status(status_params.namenode_pid_file)
+    pass
+
+  def decommission(self, env):
+    import params
+
+    env.set_params(params)
+    namenode(action="decommission")
+    pass
+
+if __name__ == "__main__":
+  NameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/params.py
new file mode 100644
index 0000000..685e25f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/params.py
@@ -0,0 +1,188 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import status_params
+import os
+
+config = Script.get_config()
+
+#security params
+security_enabled = config['configurations']['global']['security_enabled']
+dfs_journalnode_keytab_file = config['configurations']['hdfs-site']['dfs.journalnode.keytab.file']
+dfs_web_authentication_kerberos_keytab = config['configurations']['hdfs-site']['dfs.journalnode.keytab.file']
+dfs_secondary_namenode_keytab_file =  config['configurations']['hdfs-site']['dfs.secondary.namenode.keytab.file']
+dfs_datanode_keytab_file =  config['configurations']['hdfs-site']['dfs.datanode.keytab.file']
+dfs_namenode_keytab_file =  config['configurations']['hdfs-site']['dfs.namenode.keytab.file']
+smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
+hdfs_user_keytab = config['configurations']['global']['hdfs_user_keytab']
+falcon_user = config['configurations']['global']['falcon_user']
+
+dfs_datanode_kerberos_principal = config['configurations']['hdfs-site']['dfs.datanode.kerberos.principal']
+dfs_journalnode_kerberos_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.principal']
+dfs_secondary_namenode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.internal.spnego.principal']
+dfs_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.namenode.kerberos.principal']
+dfs_web_authentication_kerberos_principal = config['configurations']['hdfs-site']['dfs.web.authentication.kerberos.principal']
+dfs_secondary_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.principal']
+dfs_journalnode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.internal.spnego.principal']
+
+#exclude file
+hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
+exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
+
+kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+#hosts
+hostname = config["hostname"]
+rm_host = default("/clusterHostInfo/rm_host", [])
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
+oozie_servers = default("/clusterHostInfo/oozie_server", [])
+hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
+hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+hs_host = default("/clusterHostInfo/hs_host", [])
+jtnode_host = default("/clusterHostInfo/jtnode_host", [])
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+nm_host = default("/clusterHostInfo/nm_host", [])
+ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
+journalnode_hosts = default("/clusterHostInfo/journalnode_hosts", [])
+zkfc_hosts = default("/clusterHostInfo/zkfc_hosts", [])
+falcon_host = default("/clusterHostInfo/falcon_server_hosts", [])
+
+has_ganglia_server = not len(ganglia_server_hosts) == 0
+has_namenodes = not len(namenode_host) == 0
+has_jobtracker = not len(jtnode_host) == 0
+has_resourcemanager = not len(rm_host) == 0
+has_histroryserver = not len(hs_host) == 0
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_slaves = not len(slave_hosts) == 0
+has_nagios = not len(hagios_server_hosts) == 0
+has_oozie_server = not len(oozie_servers)  == 0
+has_hcat_server_host = not len(hcat_server_hosts)  == 0
+has_hive_server_host = not len(hive_server_host)  == 0
+has_journalnode_hosts = not len(journalnode_hosts)  == 0
+has_zkfc_hosts = not len(zkfc_hosts)  == 0
+has_falcon_host = not len(falcon_host)  == 0
+
+
+is_namenode_master = hostname in namenode_host
+is_jtnode_master = hostname in jtnode_host
+is_rmnode_master = hostname in rm_host
+is_hsnode_master = hostname in hs_host
+is_hbase_master = hostname in hbase_master_hosts
+is_slave = hostname in slave_hosts
+
+if has_ganglia_server:
+  ganglia_server_host = ganglia_server_hosts[0]
+
+#users and groups
+yarn_user = config['configurations']['global']['yarn_user']
+hbase_user = config['configurations']['global']['hbase_user']
+nagios_user = config['configurations']['global']['nagios_user']
+oozie_user = config['configurations']['global']['oozie_user']
+webhcat_user = config['configurations']['global']['hcat_user']
+hcat_user = config['configurations']['global']['hcat_user']
+hive_user = config['configurations']['global']['hive_user']
+smoke_user =  config['configurations']['global']['smokeuser']
+mapred_user = config['configurations']['global']['mapred_user']
+hdfs_user = status_params.hdfs_user
+
+user_group = config['configurations']['global']['user_group']
+proxyuser_group =  config['configurations']['global']['proxyuser_group']
+nagios_group = config['configurations']['global']['nagios_group']
+smoke_user_group = "users"
+
+#hadoop params
+hadoop_conf_dir = "/etc/hadoop/conf"
+hadoop_pid_dir_prefix = status_params.hadoop_pid_dir_prefix
+hadoop_bin = "/usr/lib/hadoop/sbin"
+
+hdfs_log_dir_prefix = config['configurations']['global']['hdfs_log_dir_prefix']
+
+dfs_domain_socket_path = config['configurations']['hdfs-site']['dfs.domain.socket.path']
+dfs_domain_socket_dir = os.path.dirname(dfs_domain_socket_path)
+
+hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
+
+jn_edits_dir = config['configurations']['hdfs-site']['dfs.journalnode.edits.dir']#"/grid/0/hdfs/journal"
+
+# if stack_version[0] == "2":
+dfs_name_dir = config['configurations']['hdfs-site']['dfs.namenode.name.dir']
+# else:
+#   dfs_name_dir = default("/configurations/hdfs-site/dfs.name.dir","/tmp/hadoop-hdfs/dfs/name")
+
+namenode_dirs_created_stub_dir = format("{hdfs_log_dir_prefix}/{hdfs_user}")
+namenode_dirs_stub_filename = "namenode_dirs_created"
+
+hbase_hdfs_root_dir = config['configurations']['hbase-site']['hbase.rootdir']#","/apps/hbase/data")
+hbase_staging_dir = "/apps/hbase/staging"
+hive_apps_whs_dir = config['configurations']['hive-site']["hive.metastore.warehouse.dir"] #, "/apps/hive/warehouse")
+webhcat_apps_dir = "/apps/webhcat"
+yarn_log_aggregation_enabled = config['configurations']['yarn-site']['yarn.log-aggregation-enable']#","true")
+yarn_nm_app_log_dir =  config['configurations']['yarn-site']['yarn.nodemanager.remote-app-log-dir']#","/app-logs")
+mapreduce_jobhistory_intermediate_done_dir = config['configurations']['mapred-site']['mapreduce.jobhistory.intermediate-done-dir']#","/app-logs")
+mapreduce_jobhistory_done_dir = config['configurations']['mapred-site']['mapreduce.jobhistory.done-dir']#","/mr-history/done")
+
+if has_oozie_server:
+  oozie_hdfs_user_dir = format("/user/{oozie_user}")
+  oozie_hdfs_user_mode = 775
+if has_hcat_server_host:
+  hcat_hdfs_user_dir = format("/user/{hcat_user}")
+  hcat_hdfs_user_mode = 755
+  webhcat_hdfs_user_dir = format("/user/{webhcat_user}")
+  webhcat_hdfs_user_mode = 755
+if has_hive_server_host:
+  hive_hdfs_user_dir = format("/user/{hive_user}")
+  hive_hdfs_user_mode = 700
+smoke_hdfs_user_dir = format("/user/{smoke_user}")
+smoke_hdfs_user_mode = 770
+
+namenode_formatted_mark_dir = format("{hadoop_pid_dir_prefix}/hdfs/namenode/formatted/")
+
+# if stack_version[0] == "2":
+fs_checkpoint_dir = config['configurations']['hdfs-site']['dfs.namenode.checkpoint.dir'] #","/tmp/hadoop-hdfs/dfs/namesecondary")
+# else:
+#   fs_checkpoint_dir = default("/configurations/core-site/fs.checkpoint.dir","/tmp/hadoop-hdfs/dfs/namesecondary")
+
+# if stack_version[0] == "2":
+dfs_data_dir = config['configurations']['hdfs-site']['dfs.datanode.data.dir']#,"/tmp/hadoop-hdfs/dfs/data")
+# else:
+#   dfs_data_dir = default('/configurations/hdfs-site/dfs.data.dir',"/tmp/hadoop-hdfs/dfs/data")
+
+# HDFS High Availability properties
+dfs_ha_enabled = False
+dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
+dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
+if dfs_ha_namenode_ids:
+  dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
+  dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
+  if dfs_ha_namenode_ids_array_len > 1:
+    dfs_ha_enabled = True
+if dfs_ha_enabled:
+  for nn_id in dfs_ha_namemodes_ids_list:
+    nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
+    if hostname in nn_host:
+      namenode_id = nn_id
+  namenode_id = None
+
+journalnode_address = default('/configurations/hdfs-site/dfs.journalnode.http-address', None)
+if journalnode_address:
+  journalnode_port = journalnode_address.split(":")[1]
+
+falcon_store_uri = default('configurations/global/falcon_store_uri', None)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/service_check.py
new file mode 100644
index 0000000..d27b13a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/service_check.py
@@ -0,0 +1,107 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+class HdfsServiceCheck(Script):
+  def service_check(self, env):
+    import params
+
+    env.set_params(params)
+    unique = get_unique_id_and_date()
+    dir = '/tmp'
+    tmp_file = format("{dir}/{unique}")
+
+    safemode_command = "dfsadmin -safemode get | grep OFF"
+
+    create_dir_cmd = format("fs -mkdir {dir} ; hadoop fs -chmod -R 777 {dir}")
+    test_dir_exists = format("hadoop fs -test -e {dir}")
+    cleanup_cmd = format("fs -rm {tmp_file}")
+    #cleanup put below to handle retries; if retrying there wil be a stale file
+    #that needs cleanup; exit code is fn of second command
+    create_file_cmd = format(
+      "{cleanup_cmd}; hadoop fs -put /etc/passwd {tmp_file}")
+    test_cmd = format("fs -test -e {tmp_file}")
+    if params.security_enabled:
+      Execute(format(
+        "su - {smoke_user} -c '{kinit_path_local} -kt {smoke_user_keytab} "
+        "{smoke_user}'"))
+    ExecuteHadoop(safemode_command,
+                  user=params.smoke_user,
+                  logoutput=True,
+                  conf_dir=params.hadoop_conf_dir,
+                  try_sleep=3,
+                  tries=5
+    )
+    ExecuteHadoop(create_dir_cmd,
+                  user=params.smoke_user,
+                  logoutput=True,
+                  not_if=test_dir_exists,
+                  conf_dir=params.hadoop_conf_dir,
+                  try_sleep=3,
+                  tries=5
+    )
+    ExecuteHadoop(create_file_cmd,
+                  user=params.smoke_user,
+                  logoutput=True,
+                  conf_dir=params.hadoop_conf_dir,
+                  try_sleep=3,
+                  tries=5
+    )
+    ExecuteHadoop(test_cmd,
+                  user=params.smoke_user,
+                  logoutput=True,
+                  conf_dir=params.hadoop_conf_dir,
+                  try_sleep=3,
+                  tries=5
+    )
+    if params.has_journalnode_hosts:
+      journalnode_port = params.journalnode_port
+      smoke_test_user = params.smoke_user
+      checkWebUIFileName = "checkWebUI.py"
+      checkWebUIFilePath = format("/tmp/{checkWebUIFileName}")
+      comma_sep_jn_hosts = ",".join(params.journalnode_hosts)
+      checkWebUICmd = format(
+        "su - {smoke_test_user} -c 'python {checkWebUIFilePath} -m "
+        "{comma_sep_jn_hosts} -p {journalnode_port}'")
+      File(checkWebUIFilePath,
+           content=StaticFile(checkWebUIFileName))
+
+      Execute(checkWebUICmd,
+              logoutput=True,
+              try_sleep=3,
+              tries=5
+      )
+
+    if params.is_namenode_master:
+      if params.has_zkfc_hosts:
+        pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
+        pid_file = format("{pid_dir}/hadoop-{hdfs_user}-zkfc.pid")
+        check_zkfc_process_cmd = format(
+          "ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
+        Execute(check_zkfc_process_cmd,
+                logoutput=True,
+                try_sleep=3,
+                tries=5
+        )
+
+
+if __name__ == "__main__":
+  HdfsServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/snamenode.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/snamenode.py
new file mode 100644
index 0000000..8f682ec
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/snamenode.py
@@ -0,0 +1,64 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from hdfs_snamenode import snamenode
+
+
+class SNameNode(Script):
+  def install(self, env):
+    import params
+
+    env.set_params(params)
+
+    self.install_packages(env)
+
+
+  def start(self, env):
+    import params
+
+    env.set_params(params)
+
+    self.config(env)
+    snamenode(action="start")
+
+  def stop(self, env):
+    import params
+
+    env.set_params(params)
+
+    snamenode(action="stop")
+
+  def config(self, env):
+    import params
+
+    env.set_params(params)
+
+    snamenode(action="configure")
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+
+    check_process_status(status_params.snamenode_pid_file)
+
+
+if __name__ == "__main__":
+  SNameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/status_params.py
new file mode 100644
index 0000000..4097373
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/status_params.py
@@ -0,0 +1,31 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+hadoop_pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix']
+hdfs_user = config['configurations']['global']['hdfs_user']
+hdp_pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
+datanode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-datanode.pid")
+namenode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-namenode.pid")
+snamenode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-secondarynamenode.pid")
+journalnode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-journalnode.pid")
+zkfc_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-zkfc.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/utils.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/utils.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/utils.py
new file mode 100644
index 0000000..225cd2e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/utils.py
@@ -0,0 +1,138 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+def service(action=None, name=None, user=None, create_pid_dir=False,
+            create_log_dir=False, keytab=None, principal=None):
+  import params
+
+  kinit_cmd = "true"
+  pid_dir = format("{hadoop_pid_dir_prefix}/{user}")
+  pid_file = format("{pid_dir}/hadoop-{user}-{name}.pid")
+  log_dir = format("{hdfs_log_dir_prefix}/{user}")
+  hadoop_daemon = format(
+    "export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && "
+    "{hadoop_bin}/hadoop-daemon.sh")
+  cmd = format("{hadoop_daemon} --config {hadoop_conf_dir}")
+
+  if create_pid_dir:
+    Directory(pid_dir,
+              owner=user,
+              recursive=True)
+  if create_log_dir:
+    Directory(log_dir,
+              owner=user,
+              recursive=True)
+
+  if params.security_enabled:
+    principal_replaced = principal.replace("_HOST", params.hostname)
+    kinit_cmd = format("kinit -kt {keytab} {principal_replaced}")
+
+    if name == "datanode":
+      user = "root"
+      pid_file = format(
+        "{hadoop_pid_dir_prefix}/{hdfs_user}/hadoop-{hdfs_user}-{name}.pid")
+
+  daemon_cmd = format("{cmd} {action} {name}")
+
+  service_is_up = format(
+    "ls {pid_file} >/dev/null 2>&1 &&"
+    " ps `cat {pid_file}` >/dev/null 2>&1") if action == "start" else None
+
+  Execute(kinit_cmd)
+  Execute(daemon_cmd,
+          user = user,
+          not_if=service_is_up
+  )
+  if action == "stop":
+    File(pid_file,
+         action="delete",
+         ignore_failures=True
+    )
+
+
+def hdfs_directory(name=None, owner=None, group=None,
+                   mode=None, recursive_chown=False, recursive_chmod=False):
+  import params
+
+  dir_exists = format("hadoop fs -ls {name} >/dev/null 2>&1")
+  namenode_safe_mode_off = "hadoop dfsadmin -safemode get|grep 'Safe mode is OFF'"
+
+  stub_dir = params.namenode_dirs_created_stub_dir
+  stub_filename = params.namenode_dirs_stub_filename
+  dir_absent_in_stub = format(
+    "grep -q '^{name}$' {stub_dir}/{stub_filename} > /dev/null 2>&1; test $? -ne 0")
+  record_dir_in_stub = format("echo '{name}' >> {stub_dir}/{stub_filename}")
+  tries = 3
+  try_sleep = 10
+  dfs_check_nn_status_cmd = "true"
+
+  if params.dfs_ha_enabled:
+    namenode_id = params.namenode_id
+    dfs_check_nn_status_cmd = format(
+      "hdfs haadmin -getServiceState $namenode_id | grep active > /dev/null")
+
+  #if params.stack_version[0] == "2":
+  mkdir_cmd = format("fs -mkdir -p {name}")
+  #else:
+  #  mkdir_cmd = format("fs -mkdir {name}")
+
+  if params.security_enabled:
+    Execute(format("kinit -kt {hdfs_user_keytab} {hdfs_user}"),
+            user = params.hdfs_user)
+  ExecuteHadoop(mkdir_cmd,
+                try_sleep=try_sleep,
+                tries=tries,
+                not_if=format(
+                  "! {dir_absent_in_stub} && {dfs_check_nn_status_cmd} && "
+                  "{dir_exists} && ! {namenode_safe_mode_off}"),
+                only_if=format(
+                  "{dir_absent_in_stub} && {dfs_check_nn_status_cmd} && "
+                  "! {dir_exists}"),
+                conf_dir=params.hadoop_conf_dir,
+                user=params.hdfs_user
+  )
+  Execute(record_dir_in_stub,
+          user=params.hdfs_user,
+          only_if=format("{dir_absent_in_stub}")
+  )
+
+  recursive = "-R" if recursive_chown else ""
+  perm_cmds = []
+
+  if owner:
+    chown = owner
+    if group:
+      chown = format("{owner}:{group}")
+    perm_cmds.append(format("fs -chown {recursive} {chown} {name}"))
+  if mode:
+    perm_cmds.append(format("fs -chmod {recursive} {mode} {name}"))
+  for cmd in perm_cmds:
+    ExecuteHadoop(cmd,
+                  user=params.hdfs_user,
+                  only_if=format("! {dir_absent_in_stub} && {dfs_check_nn_status_cmd} && {namenode_safe_mode_off} && {dir_exists}"),
+                  try_sleep=try_sleep,
+                  tries=tries,
+                  conf_dir=params.hadoop_conf_dir
+    )
+
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/zkfc_slave.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/zkfc_slave.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/zkfc_slave.py
new file mode 100644
index 0000000..1f9ba65
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/scripts/zkfc_slave.py
@@ -0,0 +1,62 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from utils import service
+
+
+class ZkfcSlave(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env)
+    env.set_params(params)
+
+  def start(self, env):
+    import params
+
+    env.set_params(params)
+    self.config(env)
+    service(
+      action="start", name="zkfc", user=params.hdfs_user, create_pid_dir=True,
+      create_log_dir=True
+    )
+
+  def stop(self, env):
+    import params
+
+    env.set_params(params)
+    service(
+      action="stop", name="zkfc", user=params.hdfs_user, create_pid_dir=True,
+      create_log_dir=True
+    )
+
+  def config(self, env):
+    pass
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+
+    check_process_status(status_params.zkfc_pid_file)
+
+
+if __name__ == "__main__":
+  ZkfcSlave().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/templates/exclude_hosts_list.j2
new file mode 100644
index 0000000..c3af46e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/package/templates/exclude_hosts_list.j2
@@ -0,0 +1,3 @@
+{% for host in hdfs_exclude_file %}
+{{host}}
+{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/configuration/hive-site.xml
new file mode 100644
index 0000000..b3ed5f5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/configuration/hive-site.xml
@@ -0,0 +1,267 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+Licensed to the Apache Software Foundation (ASF) under one or more
+contributor license agreements. See the NOTICE file distributed with
+this work for additional information regarding copyright ownership.
+The ASF licenses this file to You under the Apache License, Version 2.0
+(the "License"); you may not use this file except in compliance with
+the License. You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+<configuration>
+
+  <property>
+    <name>ambari.hive.db.schema.name</name>
+    <value>hive</value>
+    <description>Database name used as the Hive Metastore</description>
+  </property>
+
+  <property>
+    <name>javax.jdo.option.ConnectionURL</name>
+    <value>jdbc</value>
+    <description>JDBC connect string for a JDBC metastore</description>
+  </property>
+
+  <property>
+    <name>javax.jdo.option.ConnectionDriverName</name>
+    <value>com.mysql.jdbc.Driver</value>
+    <description>Driver class name for a JDBC metastore</description>
+  </property>
+
+  <property>
+    <name>javax.jdo.option.ConnectionUserName</name>
+    <value>hive</value>
+    <description>username to use against metastore database</description>
+  </property>
+
+  <property>
+    <name>javax.jdo.option.ConnectionPassword</name>
+    <value> </value>
+    <description>password to use against metastore database</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.warehouse.dir</name>
+    <value>/apps/hive/warehouse</value>
+    <description>location of default database for the warehouse</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.sasl.enabled</name>
+    <value></value>
+    <description>If true, the metastore thrift interface will be secured with SASL.
+     Clients must authenticate with Kerberos.</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.kerberos.keytab.file</name>
+    <value></value>
+    <description>The path to the Kerberos Keytab file containing the metastore
+     thrift server's service principal.</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.kerberos.principal</name>
+    <value></value>
+    <description>The service principal for the metastore thrift server. The special
+    string _HOST will be replaced automatically with the correct host name.</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.cache.pinobjtypes</name>
+    <value>Table,Database,Type,FieldSchema,Order</value>
+    <description>List of comma separated metastore object types that should be pinned in the cache</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.uris</name>
+    <value>thrift://localhost:9083</value>
+    <description>URI for client to contact metastore server</description>
+  </property>
+
+  <property>
+    <name>hive.semantic.analyzer.factory.impl</name>
+    <value>org.apache.hivealog.cli.HCatSemanticAnalyzerFactory</value>
+    <description>controls which SemanticAnalyzerFactory implemenation class is used by CLI</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.client.socket.timeout</name>
+    <value>60</value>
+    <description>MetaStore Client socket timeout in seconds</description>
+  </property>
+
+  <property>
+    <name>hive.metastore.execute.setugi</name>
+    <value>true</value>
+    <description>In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and     server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.</description>
+  </property>
+
+  <property>
+    <name>hive.security.authorization.enabled</name>
+    <value>false</value>
+    <description>enable or disable the hive client authorization</description>
+  </property>
+
+  <property>
+    <name>hive.security.authorization.manager</name>
+    <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value>
+    <description>the hive client authorization manager class name.
+    The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  </description>
+  </property>
+
+  <property>
+    <name>hive.security.metastore.authorization.manager</name>
+    <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value>
+    <description>The authorization manager class name to be used in the metastore for authorization. The user-defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider.  </description>
+  </property>
+
+  <property>
+    <name>hive.security.authenticator.manager</name>
+    <value>org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator</value>
+    <description>Hive client authenticator manager class name. The user-defined authenticator class should implement interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider.  </description>
+  </property>
+
+  <property>
+    <name>hive.server2.enable.doAs</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>fs.hdfs.impl.disable.cache</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>fs.file.impl.disable.cache</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>hive.enforce.bucketing</name>
+    <value>true</value>
+    <description>Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced.</description>
+  </property>
+
+  <property>
+    <name>hive.enforce.sorting</name>
+    <value>true</value>
+    <description>Whether sorting is enforced. If true, while inserting into the table, sorting is enforced.</description>
+  </property>
+
+  <property>
+    <name>hive.map.aggr</name>
+    <value>true</value>
+    <description>Whether to use map-side aggregation in Hive Group By queries.</description>
+  </property>
+
+  <property>
+    <name>hive.optimize.bucketmapjoin</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>hive.optimize.bucketmapjoin.sortedmerge</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>hive.mapred.reduce.tasks.speculative.execution</name>
+    <value>false</value>
+    <description>Whether speculative execution for reducers should be turned on.</description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.join</name>
+    <value>true</value>
+    <description>Whether Hive enable the optimization about converting common
+      join into mapjoin based on the input file size.</description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.sortmerge.join</name>
+    <value>true</value>
+    <description>Will the join be automatically converted to a sort-merge join, if the joined tables pass
+      the criteria for sort-merge join.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.sortmerge.join.noconditionaltask</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.join.noconditionaltask</name>
+    <value>true</value>
+    <description>Whether Hive enable the optimization about converting common join into mapjoin based on the input file
+      size. If this paramater is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the
+      specified size, the join is directly converted to a mapjoin (there is no conditional task).
+    </description>
+  </property>
+
+  <property>
+    <name>hive.auto.convert.join.noconditionaltask.size</name>
+    <value>1000000000</value>
+    <description>If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. However, if it
+      is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, the join is directly
+      converted to a mapjoin(there is no conditional task). The default is 10MB.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.optimize.reducededuplication.min.reducer</name>
+    <value>1</value>
+    <description>Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS.
+      That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.
+      The optimization will be disabled if number of reducers is less than specified value.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.optimize.mapjoin.mapreduce</name>
+    <value>true</value>
+    <description>If hive.auto.convert.join is off, this parameter does not take
+      affect. If it is on, and if there are map-join jobs followed by a map-reduce
+      job (for e.g a group by), each map-only job is merged with the following
+      map-reduce job.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.mapjoin.bucket.cache.size</name>
+    <value>10000</value>
+    <description>
+      Size per reducer.The default is 1G, i.e if the input size is 10G, it
+      will use 10 reducers.
+    </description>
+  </property>
+
+  <property>
+    <name>hive.vectorized.execution.enabled</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <name>hive.optimize.reducededuplication</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>hive.optimize.index.filter</name>
+    <value>true</value>
+    <description>
+    Whether to enable automatic use of indexes
+    </description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/metainfo.xml
new file mode 100644
index 0000000..da752c9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/metainfo.xml
@@ -0,0 +1,156 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HIVE</name>
+      <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
+      <version>0.12.0.2.1.1</version>
+      <components>
+
+        <component>
+          <name>HIVE_METASTORE</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/hive_metastore.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>HIVE_SERVER</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/hive_server.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>MYSQL_SERVER</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/mysql_server.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>HIVE_CLIENT</name>
+          <category>CLIENT</category>
+          <commandScript>
+            <script>scripts/hive_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>hive</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>mysql-connector-java</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>mysql</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osType>centos6</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>mysql-server</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osType>centos5</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>mysql-server</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osType>suse</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>mysql-client</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>hive-site</config-type>
+      </configuration-dependencies>
+    </service>
+
+    <service>
+      <name>HCATALOG</name>
+      <comment>This is comment for HCATALOG service</comment>
+      <version>0.12.0.2.0.6.0</version>
+      <components>
+        <component>
+          <name>HCAT</name>
+          <category>CLIENT</category>
+          <commandScript>
+            <script>scripts/hcat_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+        </component>
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>hcatalog</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+    </service>
+
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/addMysqlUser.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/addMysqlUser.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/addMysqlUser.sh
new file mode 100644
index 0000000..8d31b91
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/addMysqlUser.sh
@@ -0,0 +1,41 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+mysqldservice=$1
+mysqldbuser=$2
+mysqldbpasswd=$3
+mysqldbhost=$4
+myhostname=$(hostname -f)
+
+service $mysqldservice start
+echo "Adding user $mysqldbuser@$mysqldbhost and $mysqldbuser@localhost"
+mysql -u root -e "CREATE USER '$mysqldbuser'@'$mysqldbhost' IDENTIFIED BY '$mysqldbpasswd';"
+mysql -u root -e "CREATE USER '$mysqldbuser'@'localhost' IDENTIFIED BY '$mysqldbpasswd';"
+mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'$mysqldbhost';"
+mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'localhost';"
+if [ '$(mysql -u root -e "select user from mysql.user where user='$mysqldbuser' and host='$myhostname'" | grep "$mysqldbuser")' != '0' ]; then
+  echo "Adding user $mysqldbuser@$myhostname";
+  mysql -u root -e "CREATE USER '$mysqldbuser'@'$myhostname' IDENTIFIED BY '$mysqldbpasswd';";
+  mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'$myhostname';";
+fi
+mysql -u root -e "flush privileges;"
+service $mysqldservice stop

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hcatSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hcatSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hcatSmoke.sh
new file mode 100644
index 0000000..9e7b33f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hcatSmoke.sh
@@ -0,0 +1,35 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+export tablename=$1
+
+case "$2" in
+
+prepare)
+  hcat -e "show tables"
+  hcat -e "drop table IF EXISTS ${tablename}"
+  hcat -e "create table ${tablename} ( id INT, name string ) stored as rcfile ;"
+;;
+
+cleanup)
+  hcat -e "drop table IF EXISTS ${tablename}"
+;;
+
+esac

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hiveSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hiveSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hiveSmoke.sh
new file mode 100644
index 0000000..7e03524
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hiveSmoke.sh
@@ -0,0 +1,23 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+export tablename=$1
+echo "CREATE EXTERNAL TABLE IF NOT EXISTS ${tablename} ( foo INT, bar STRING );" | hive
+echo "DESCRIBE ${tablename};" | hive

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hiveserver2.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hiveserver2.sql b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hiveserver2.sql
new file mode 100644
index 0000000..99a3865
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hiveserver2.sql
@@ -0,0 +1,23 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+CREATE EXTERNAL TABLE IF NOT EXISTS hiveserver2smoke20408 ( foo INT, bar STRING );
+DESCRIBE hiveserver2smoke20408;

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hiveserver2Smoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hiveserver2Smoke.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hiveserver2Smoke.sh
new file mode 100644
index 0000000..051a21e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/hiveserver2Smoke.sh
@@ -0,0 +1,31 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+smokeout=`/usr/lib/hive/bin/beeline -u $1 -n fakeuser -p fakepwd -d org.apache.hive.jdbc.HiveDriver -e '!run $2' 2>&1| awk '{print}'|grep Error`
+
+if [ "x$smokeout" == "x" ]; then
+  echo "Smoke test of hiveserver2 passed"
+  exit 0
+else
+  echo "Smoke test of hiveserver2 wasnt passed"
+  echo $smokeout
+  exit 1
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/pigSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/pigSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/pigSmoke.sh
new file mode 100644
index 0000000..2e90ac0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/pigSmoke.sh
@@ -0,0 +1,18 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+A = load 'passwd' using PigStorage(':');
+B = foreach A generate \$0 as id;
+store B into 'pigsmoke.out';

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/startHiveserver2.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/startHiveserver2.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/startHiveserver2.sh
new file mode 100644
index 0000000..fa90c2f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/startHiveserver2.sh
@@ -0,0 +1,22 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+HIVE_CONF_DIR=$4 /usr/lib/hive/bin/hiveserver2 -hiveconf hive.metastore.uris=' ' > $1 2> $2 &
+echo $!|cat>$3

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/startMetastore.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/startMetastore.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/startMetastore.sh
new file mode 100644
index 0000000..9350776
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/files/startMetastore.sh
@@ -0,0 +1,22 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+HIVE_CONF_DIR=$4 hive --service metastore > $1 2> $2 &
+echo $!|cat>$3

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/__init__.py
new file mode 100644
index 0000000..5561e10
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/__init__.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hcat.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hcat.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hcat.py
new file mode 100644
index 0000000..2993d3a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hcat.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import sys
+
+
+def hcat():
+  import params
+
+  Directory(params.hcat_conf_dir,
+            owner=params.hcat_user,
+            group=params.user_group,
+  )
+
+  Directory(params.hcat_pid_dir,
+            owner=params.webhcat_user,
+            recursive=True
+  )
+
+  hcat_TemplateConfig('hcat-env.sh')
+
+
+def hcat_TemplateConfig(name):
+  import params
+
+  TemplateConfig(format("{hcat_conf_dir}/{name}"),
+                 owner=params.hcat_user,
+                 group=params.user_group
+  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hcat_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hcat_client.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hcat_client.py
new file mode 100644
index 0000000..8b5921a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hcat_client.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from hcat import hcat
+
+class HCatClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+
+    env.set_params(params)
+
+    hcat()
+
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+
+if __name__ == "__main__":
+  HCatClient().execute()


[14/37] AMBARI-4341. Rename 2.0.8 to 2.1.1 in the stack definition. (mahadev)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/repos/repoinfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/repos/repoinfo.xml
new file mode 100644
index 0000000..70debfc
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/repos/repoinfo.xml
@@ -0,0 +1,75 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<reposinfo>
+  <os type="centos6">
+    <repo>
+      <baseurl>REPLACE_WITH_CENTOS6_URL</baseurl>
+      <repoid>HDP-2.1.1</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+  </os>
+  <os type="centos5">
+    <repo>
+      <baseurl>REPLACE_WITH_CENTOS5_URL</baseurl>
+      <repoid>HDP-2.1.1</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+  </os>
+  <os type="redhat6">
+    <repo>
+      <baseurl>REPLACE_WITH_CENTOS6_URL</baseurl>
+      <repoid>HDP-2.1.1</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+  </os>
+  <os type="redhat5">
+    <repo>
+      <baseurl>REPLACE_WITH_CENTOS5_URL</baseurl>
+      <repoid>HDP-2.1.1</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+  </os>
+  <os type="oraclelinux6">
+    <repo>
+      <baseurl>REPLACE_WITH_CENTOS6_URL</baseurl>
+      <repoid>HDP-2.1.1</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+  </os>
+  <os type="oraclelinux5">
+    <repo>
+      <baseurl>REPLACE_WITH_CENTOS5_URL</baseurl>
+      <repoid>HDP-2.1.1</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+  </os>
+  <os type="suse11">
+    <repo>
+      <baseurl>REPLACE_WITH_SUSE11_URL</baseurl>
+      <repoid>HDP-2.1.1</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+  </os>
+  <os type="sles11">
+    <repo>
+      <baseurl>REPLACE_WITH_SUSE11_URL</baseurl>
+      <repoid>HDP-2.1.1</repoid>
+      <reponame>HDP</reponame>
+    </repo>
+  </os>
+</reposinfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/role_command_order.json b/ambari-server/src/main/resources/stacks/HDP/2.1.1/role_command_order.json
new file mode 100644
index 0000000..416945d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/role_command_order.json
@@ -0,0 +1,107 @@
+{
+  "_comment" : "Record format:",
+  "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
+  "general_deps" : {
+    "_comment" : "dependencies for all cases",
+    "NAGIOS_SERVER-INSTALL" : ["HIVE_CLIENT-INSTALL", "HCAT-INSTALL",
+        "MAPREDUCE_CLIENT-INSTALL", "OOZIE_CLIENT-INSTALL"],
+    "NIMBUS-START" : ["ZOOKEEPER_SERVER-START"],
+    "SUPERVISOR-START" : ["NIMBUS-START"],
+    "STORM_UI_SERVER-START" : ["NIMBUS-START"],
+    "DRPC_SERVER-START" : ["NIMBUS-START"],
+    "LOGVIEWER_SERVER-START" : ["NIMBUS-START"],
+    "HBASE_MASTER-START": ["ZOOKEEPER_SERVER-START"],
+    "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"],
+    "OOZIE_SERVER-START": ["JOBTRACKER-START", "TASKTRACKER-START"],
+    "WEBHCAT_SERVER-START": ["TASKTRACKER-START", "HIVE_SERVER-START"],
+    "HIVE_METASTORE-START": ["MYSQL_SERVER-START"],
+    "HIVE_SERVER-START": ["TASKTRACKER-START", "MYSQL_SERVER-START"],
+    "HUE_SERVER-START": ["HIVE_SERVER-START", "HCAT-START", "OOZIE_SERVER-START"],
+    "FLUME_SERVER-START": ["OOZIE_SERVER-START"],
+    "NAGIOS_SERVER-START": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START",
+        "GANGLIA_SERVER-START", "GANGLIA_MONITOR-START", "HCAT-START",
+        "HIVE_SERVER-START", "HIVE_METASTORE-START", "HUE_SERVER-START",
+        "JOBTRACKER-START", "TASKTRACKER-START", "ZOOKEEPER_SERVER-START",
+        "MYSQL_SERVER-START", "OOZIE_SERVER-START", "PIG-START", "SQOOP-START",
+        "WEBHCAT_SERVER-START", "FLUME_SERVER-START"],
+    "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["JOBTRACKER-START", "TASKTRACKER-START"],
+    "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START"],
+    "WEBHCAT_SERVICE_CHECK-SERVICE_CHECK": ["WEBHCAT_SERVER-START"],
+    "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START"],
+    "HIVE_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START", "HIVE_METASTORE-START"],
+    "HCAT_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START"],
+    "PIG_SERVICE_CHECK-SERVICE_CHECK": ["JOBTRACKER-START", "TASKTRACKER-START"],
+    "SQOOP_SERVICE_CHECK-SERVICE_CHECK": ["JOBTRACKER-START", "TASKTRACKER-START"],
+    "ZOOKEEPER_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"],
+    "ZOOKEEPER_QUORUM_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"],
+    "STORM_SERVICE_CHECK-SERVICE_CHECK": ["NIMBUS-START", "SUPERVISOR-START"],
+    "ZOOKEEPER_SERVER-STOP" : ["HBASE_MASTER-STOP", "HBASE_REGIONSERVER-STOP"],
+    "HBASE_MASTER-STOP": ["HBASE_REGIONSERVER-STOP"],
+    "NIMBUS-STOP" : ["SUPERVISOR-STOP", "STORM_UI_SERVER-STOP", "DRPC_SERVER-STOP", "LOGVIEWER_SERVER-STOP"],
+    "TASKTRACKER-UPGRADE": ["JOBTRACKER-UPGRADE"],
+    "MAPREDUCE_CLIENT-UPGRADE": ["TASKTRACKER-UPGRADE", "JOBTRACKER-UPGRADE"],
+    "ZOOKEEPER_SERVER-UPGRADE": ["MAPREDUCE_CLIENT-UPGRADE"],
+    "ZOOKEEPER_CLIENT-UPGRADE": ["ZOOKEEPER_SERVER-UPGRADE"],
+    "HBASE_MASTER-UPGRADE": ["ZOOKEEPER_CLIENT-UPGRADE"],
+    "HBASE_REGIONSERVER-UPGRADE": ["HBASE_MASTER-UPGRADE"],
+    "HBASE_CLIENT-UPGRADE": ["HBASE_REGIONSERVER-UPGRADE"],
+    "HIVE_SERVER-UPGRADE" : ["HBASE_CLIENT-UPGRADE"],
+    "HIVE_METASTORE-UPGRADE" : ["HIVE_SERVER-UPGRADE"],
+    "MYSQL_SERVER-UPGRADE": ["HIVE_METASTORE-UPGRADE"],
+    "HIVE_CLIENT-UPGRADE": ["MYSQL_SERVER-UPGRADE"],
+    "HCAT-UPGRADE": ["HIVE_CLIENT-UPGRADE"],
+    "OOZIE_SERVER-UPGRADE" : ["HCAT-UPGRADE"],
+    "OOZIE_CLIENT-UPGRADE" : ["OOZIE_SERVER-UPGRADE"],
+    "WEBHCAT_SERVER-UPGRADE" : ["OOZIE_CLIENT-UPGRADE"],
+    "PIG-UPGRADE" : ["WEBHCAT_SERVER-UPGRADE"],
+    "SQOOP-UPGRADE" : ["PIG-UPGRADE"],
+    "NAGIOS_SERVER-UPGRADE" : ["SQOOP-UPGRADE"],
+    "GANGLIA_SERVER-UPGRADE" : ["NAGIOS_SERVER-UPGRADE"],
+    "GANGLIA_MONITOR-UPGRADE" : ["GANGLIA_SERVER-UPGRADE"]
+  },
+  "_comment" : "GLUSTERFS-specific dependencies",
+  "optional_glusterfs": {
+    "HBASE_MASTER-START": ["PEERSTATUS-START"],
+    "JOBTRACKER-START": ["PEERSTATUS-START"],
+    "TASKTRACKER-START": ["PEERSTATUS-START"],
+    "GLUSTERFS_SERVICE_CHECK-SERVICE_CHECK": ["PEERSTATUS-START"],
+    "JOBTRACKER-UPGRADE": ["GLUSTERFS_CLIENT-UPGRADE"]
+  },
+  "_comment" : "Dependencies that are used when GLUSTERFS is not present in cluster",
+  "optional_no_glusterfs": {
+    "SECONDARY_NAMENODE-START": ["NAMENODE-START"],
+    "RESOURCEMANAGER-START": ["NAMENODE-START", "DATANODE-START"],
+    "NODEMANAGER-START": ["NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START"],
+    "HISTORYSERVER-START": ["NAMENODE-START", "DATANODE-START"],
+    "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START"],
+    "JOBTRACKER-START": ["NAMENODE-START", "DATANODE-START"],
+    "TASKTRACKER-START": ["NAMENODE-START", "DATANODE-START"],
+    "HIVE_SERVER-START": ["DATANODE-START"],
+    "WEBHCAT_SERVER-START": ["DATANODE-START"],
+    "NAGIOS_SERVER-START": ["NAMENODE-START", "SECONDARY_NAMENODE-START",
+        "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START", "HISTORYSERVER-START"],
+    "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["NAMENODE-START", "DATANODE-START",
+        "SECONDARY_NAMENODE-START"],
+    "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START",
+        "RESOURCEMANAGER-START", "HISTORYSERVER-START", "YARN_SERVICE_CHECK-SERVICE_CHECK"],
+    "YARN_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
+    "RESOURCEMANAGER_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START"],
+    "PIG_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START", "NODEMANAGER-START"],
+    "NAMENODE-STOP": ["JOBTRACKER-STOP", "TASKTRACKER-STOP", "RESOURCEMANAGER-STOP",
+        "NODEMANAGER-STOP", "HISTORYSERVER-STOP", "HBASE_MASTER-STOP"],
+    "DATANODE-STOP": ["JOBTRACKER-STOP", "TASKTRACKER-STOP", "RESOURCEMANAGER-STOP",
+        "NODEMANAGER-STOP", "HISTORYSERVER-STOP", "HBASE_MASTER-STOP"],
+    "SECONDARY_NAMENODE-UPGRADE": ["NAMENODE-UPGRADE"],
+    "DATANODE-UPGRADE": ["SECONDARY_NAMENODE-UPGRADE"],
+    "HDFS_CLIENT-UPGRADE": ["DATANODE-UPGRADE"],
+    "JOBTRACKER-UPGRADE": ["HDFS_CLIENT-UPGRADE"]
+  },
+  "_comment" : "Dependencies that are used in HA NameNode cluster",
+  "optional_ha": {
+    "NAMENODE-START": ["JOURNALNODE-START", "ZOOKEEPER_SERVER-START"],
+    "ZKFC-START": ["NAMENODE-START"],
+    "NAGIOS_SERVER-START": ["ZKFC-START", "JOURNALNODE-START"],
+    "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["ZKFC-START"]
+  }
+}
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/configuration/global.xml
new file mode 100644
index 0000000..1d56238
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/configuration/global.xml
@@ -0,0 +1,42 @@
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>falcon_user</name>
+    <value>falcon</value>
+    <description>Falcon user.</description>
+  </property>
+  <property>
+    <name>falcon_port</name>
+    <value>15000</value>
+    <description>Falcon server port.</description>
+  </property>
+  <property>
+    <name>falcon_local_dir</name>
+    <value>/hadoop/falcon</value>
+    <description>Falcon directory to store different data.</description>
+  </property>
+  <property>
+    <name>falcon_store_uri</name>
+    <value>file:///hadoop/falcon/store</value>
+    <description>Falcon store URI.</description>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/configuration/oozie-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/configuration/oozie-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/configuration/oozie-site.xml
new file mode 100644
index 0000000..186677b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/configuration/oozie-site.xml
@@ -0,0 +1,145 @@
+<?xml version="1.0"?>
+<!--
+  Licensed to the Apache Software Foundation (ASF) under one
+  or more contributor license agreements.  See the NOTICE file
+  distributed with this work for additional information
+  regarding copyright ownership.  The ASF licenses this file
+  to you under the Apache License, Version 2.0 (the
+  "License"); you may not use this file except in compliance
+  with the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License.
+-->
+
+<configuration>
+  <property>
+    <name>oozie.service.ProxyUserService.proxyuser.falcon.hosts</name>
+    <value>*</value>
+    <description>Falcon proxyuser hosts</description>
+  </property>
+
+  <property>
+    <name>oozie.service.ProxyUserService.proxyuser.falcon.groups</name>
+    <value>*</value>
+    <description>Falcon proxyuser groups</description>
+  </property>
+
+  <property>
+    <name>oozie.service.ELService.ext.functions.coord-job-submit-instances</name>
+    <value>
+      now=org.apache.oozie.extensions.OozieELExtensions#ph1_now_echo,
+      today=org.apache.oozie.extensions.OozieELExtensions#ph1_today_echo,
+      yesterday=org.apache.oozie.extensions.OozieELExtensions#ph1_yesterday_echo,
+      currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_currentMonth_echo,
+      lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_lastMonth_echo,
+      currentYear=org.apache.oozie.extensions.OozieELExtensions#ph1_currentYear_echo,
+      lastYear=org.apache.oozie.extensions.OozieELExtensions#ph1_lastYear_echo,
+      formatTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_formatTime_echo,
+      latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,
+      future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo
+    </value>
+    <description>Falcon</description>
+  </property>
+
+  <property>
+    <name>oozie.service.ELService.ext.functions.coord-action-create-inst</name>
+    <value>
+      now=org.apache.oozie.extensions.OozieELExtensions#ph2_now_inst,
+      today=org.apache.oozie.extensions.OozieELExtensions#ph2_today_inst,
+      yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday_inst,
+      currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth_inst,
+      lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth_inst,
+      currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear_inst,
+      lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear_inst,
+      latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,
+      future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo,
+      formatTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_formatTime,
+      user=org.apache.oozie.coord.CoordELFunctions#coord_user
+    </value>
+    <description>Falcon</description>
+  </property>
+
+  <property>
+    <name>oozie.service.ELService.ext.functions.coord-action-create</name>
+    <value>
+      now=org.apache.oozie.extensions.OozieELExtensions#ph2_now,
+      today=org.apache.oozie.extensions.OozieELExtensions#ph2_today,
+      yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday,
+      currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth,
+      lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth,
+      currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear,
+      lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear,
+      latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,
+      future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo,
+      formatTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_formatTime,
+      user=org.apache.oozie.coord.CoordELFunctions#coord_user
+    </value>
+    <description>Falcon</description>
+  </property>
+
+  <property>
+    <name>oozie.service.ELService.ext.functions.coord-job-submit-data</name>
+    <value>
+      now=org.apache.oozie.extensions.OozieELExtensions#ph1_now_echo,
+      today=org.apache.oozie.extensions.OozieELExtensions#ph1_today_echo,
+      yesterday=org.apache.oozie.extensions.OozieELExtensions#ph1_yesterday_echo,
+      currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_currentMonth_echo,
+      lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_lastMonth_echo,
+      currentYear=org.apache.oozie.extensions.OozieELExtensions#ph1_currentYear_echo,
+      lastYear=org.apache.oozie.extensions.OozieELExtensions#ph1_lastYear_echo,
+      dataIn=org.apache.oozie.extensions.OozieELExtensions#ph1_dataIn_echo,
+      instanceTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_nominalTime_echo_wrap,
+      formatTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_formatTime_echo,
+      dateOffset=org.apache.oozie.coord.CoordELFunctions#ph1_coord_dateOffset_echo,
+      user=org.apache.oozie.coord.CoordELFunctions#coord_user
+    </value>
+    <description>Falcon</description>
+  </property>
+
+  <property>
+    <name>oozie.service.ELService.ext.functions.coord-action-start</name>
+    <value>
+      now=org.apache.oozie.extensions.OozieELExtensions#ph2_now,
+      today=org.apache.oozie.extensions.OozieELExtensions#ph2_today,
+      yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday,
+      currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth,
+      lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth,
+      currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear,
+      lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear,
+      latest=org.apache.oozie.coord.CoordELFunctions#ph3_coord_latest,
+      future=org.apache.oozie.coord.CoordELFunctions#ph3_coord_future,
+      dataIn=org.apache.oozie.extensions.OozieELExtensions#ph3_dataIn,
+      instanceTime=org.apache.oozie.coord.CoordELFunctions#ph3_coord_nominalTime,
+      dateOffset=org.apache.oozie.coord.CoordELFunctions#ph3_coord_dateOffset,
+      formatTime=org.apache.oozie.coord.CoordELFunctions#ph3_coord_formatTime,
+      user=org.apache.oozie.coord.CoordELFunctions#coord_user
+    </value>
+    <description>Falcon</description>
+  </property>
+
+  <property>
+    <name>oozie.service.ELService.ext.functions.coord-sla-submit</name>
+    <value>
+      instanceTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_nominalTime_echo_fixed,
+      user=org.apache.oozie.coord.CoordELFunctions#coord_user
+    </value>
+    <description>Falcon</description>
+  </property>
+
+  <property>
+    <name>oozie.service.ELService.ext.functions.coord-sla-create</name>
+    <value>
+      instanceTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_nominalTime,
+      user=org.apache.oozie.coord.CoordELFunctions#coord_user
+    </value>
+    <description>Falcon</description>
+  </property>
+
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/metainfo.xml
new file mode 100644
index 0000000..7587a8b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/metainfo.xml
@@ -0,0 +1,72 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>FALCON</name>
+      <comment>Data management and processing platform</comment>
+      <version>0.4.0.2.1.1</version>
+      <components>
+        <component>
+          <name>FALCON_CLIENT</name>
+          <category>CLIENT</category>
+          <commandScript>
+            <script>scripts/falcon_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+        <component>
+          <name>FALCON_SERVER</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/falcon_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+      </components>
+
+      <!--TODO: uncomment this after package will be available in repo-->
+      <!--<osSpecifics>-->
+        <!--<osSpecific>-->
+          <!--<osType>any</osType>-->
+          <!--<packages>-->
+            <!--<package>-->
+              <!--<type>rpm</type>-->
+              <!--<name>falcon-0.4.0.2.0.6.0-76</name>-->
+            <!--</package>-->
+          <!--</packages>-->
+        <!--</osSpecific>-->
+      <!--</osSpecifics>-->
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>oozie-site</config-type>
+        <config-type>global</config-type>
+      </configuration-dependencies>
+
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/package/scripts/falcon.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/package/scripts/falcon.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/package/scripts/falcon.py
new file mode 100644
index 0000000..d6c3d1d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/package/scripts/falcon.py
@@ -0,0 +1,66 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+def falcon(type, action = None):
+  import params
+
+  #TODO remove after package will be available in repo
+  Execute("cd /tmp; rm -f falcon-0.4.0.2.0.6.0-76.el6.noarch.rpm; "
+          "wget http://public-repo-1.hortonworks.com/HDP-LABS/Projects/Falcon/2.0.6.0-76/rpm/falcon-0.4.0.2.0.6.0-76.el6.noarch.rpm; "
+          "rpm -Uvh --nodeps falcon-0.4.0.2.0.6.0-76.el6.noarch.rpm",
+          not_if='yum list installed | grep falcon'
+  )
+
+  if type == 'client':
+    if action == 'config':
+      File(params.falcon_conf_dir + '/client.properties',
+           content=Template('client.properties.j2'),
+           mode=0644)
+  elif type == 'server':
+    if action == 'config':
+      Directory(params.falcon_local_dir,
+                owner=params.falcon_user,
+                recursive=True
+      )
+      Directory(params.falcon_data_dir,
+                owner=params.falcon_user,
+                recursive=True
+      )
+      File(params.falcon_conf_dir + '/runtime.properties',
+           content=Template('runtime.properties.j2'),
+           mode=0644
+      )
+      File(params.falcon_conf_dir + '/startup.properties',
+           content=Template('startup.properties.j2'),
+           mode=0644
+      )
+    if action == 'start':
+      Execute(format('env JAVA_HOME={java_home} FALCON_LOG_DIR=/var/log/falcon '
+                     'FALCON_PID_DIR=/var/run/falcon FALCON_DATA_DIR={falcon_data_dir} '
+                     '{falcon_home}/bin/falcon-start -port {falcon_port}'),
+              user=params.falcon_user
+      )
+    if action == 'stop':
+      Execute(format('env JAVA_HOME={java_home} FALCON_LOG_DIR=/var/log/falcon '
+                     'FALCON_PID_DIR=/var/run/falcon FALCON_DATA_DIR={falcon_data_dir} '
+                     '{falcon_home}/bin/falcon-stop'),
+              user=params.falcon_user
+      )

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/package/scripts/falcon_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/package/scripts/falcon_client.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/package/scripts/falcon_client.py
new file mode 100644
index 0000000..ab59b6f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/package/scripts/falcon_client.py
@@ -0,0 +1,40 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from falcon import falcon
+
+class FalconClient(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env)
+    env.set_params(params)
+    self.config(env)
+
+  def config(self, env):
+    import params
+
+    falcon('client', action='config')
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+  FalconClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/package/scripts/falcon_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/package/scripts/falcon_server.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/package/scripts/falcon_server.py
new file mode 100644
index 0000000..9415694
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/package/scripts/falcon_server.py
@@ -0,0 +1,61 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from falcon import falcon
+
+class FalconServer(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env)
+    env.set_params(params)
+    self.config(env)
+
+  def start(self, env):
+    import params
+
+    env.set_params(params)
+
+    falcon('server', action='start')
+
+  def stop(self, env):
+    import params
+
+    env.set_params(params)
+
+    falcon('server', action='stop')
+
+
+  def config(self, env):
+    import params
+
+    env.set_params(params)
+
+    falcon('server', action='config')
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    check_process_status(status_params.falcon_pid_dir)
+
+
+if __name__ == "__main__":
+  FalconServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/package/scripts/params.py
new file mode 100644
index 0000000..7f27862
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/package/scripts/params.py
@@ -0,0 +1,36 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+oozie_user = config['configurations']['global']['oozie_user']
+falcon_user = config['configurations']['global']['falcon_user']
+smoke_user =  config['configurations']['global']['smokeuser']
+
+java_home = config['hostLevelParams']['java_home']
+falcon_home = '/usr/lib/falcon'
+falcon_conf_dir = '/etc/falcon/conf'
+falcon_local_dir = config['configurations']['global']['falcon_local_dir']
+falcon_log_dir = '/var/log/falcon'
+falcon_data_dir = format('{falcon_local_dir}/activemq')
+store_uri = config['configurations']['global']['falcon_store_uri']
+falcon_host = config['clusterHostInfo']['falcon_server_hosts'][0]
+falcon_port = config['configurations']['global']['falcon_port']

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/package/scripts/service_check.py
new file mode 100644
index 0000000..19fbaf4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/package/scripts/service_check.py
@@ -0,0 +1,38 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+class FalconServiceCheck(Script):
+
+  def service_check(self, env):
+    import params
+
+    env.set_params(params)
+
+    Execute(format("env JAVA_HOME={java_home} FALCON_LOG_DIR=/var/log/falcon "
+                   "FALCON_PID_DIR=/var/run/falcon FALCON_DATA_DIR={falcon_data_dir} "
+                   "{falcon_home}/bin/falcon admin -version"),
+            user=params.smoke_user,
+            logoutput=True
+    )
+
+if __name__ == "__main__":
+  FalconServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/package/scripts/status_params.py
new file mode 100644
index 0000000..395766c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/package/scripts/status_params.py
@@ -0,0 +1,24 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+falcon_pid_dir = '/var/run/falcon'
+server_pid_file = format('{falcon_pid_dir}/falcon.pid')

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/package/templates/client.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/package/templates/client.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/package/templates/client.properties.j2
new file mode 100644
index 0000000..6ffc110
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/package/templates/client.properties.j2
@@ -0,0 +1,24 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#########################################################################
+##########    This is used for falcon packaging only. ###################
+## Uses default port. Please change if configured for non-default port ##
+#########################################################################
+
+falcon.url=http://{{falcon_host}}:{{falcon_port}}/

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/package/templates/runtime.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/package/templates/runtime.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/package/templates/runtime.properties.j2
new file mode 100644
index 0000000..677e5e3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/package/templates/runtime.properties.j2
@@ -0,0 +1,33 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+####################################################
+####    This is used for falcon packaging only. ####
+####################################################
+
+*.domain=${falcon.app.type}
+
+*.log.cleanup.frequency.minutes.retention=hours(6)
+*.log.cleanup.frequency.hours.retention=minutes(1)
+*.log.cleanup.frequency.days.retention=days(7)
+*.log.cleanup.frequency.months.retention=months(3)
+
+#### To configure falcon servers with prism ####
+#*.all.colos=<comma separated list of colos where falcon servers are installed>
+#*.falcon.<colo>.endpoint=<falcon server endpoint>
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/package/templates/startup.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/package/templates/startup.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/package/templates/startup.properties.j2
new file mode 100644
index 0000000..ade21c9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/FALCON/package/templates/startup.properties.j2
@@ -0,0 +1,70 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+####################################################
+####    This is used for falcon packaging only. ####
+####################################################
+
+*.domain=${falcon.app.type}
+
+######### Implementation classes #########
+## DONT MODIFY UNLESS SURE ABOUT CHANGE ##
+*.workflow.engine.impl=org.apache.falcon.workflow.engine.OozieWorkflowEngine
+*.oozie.process.workflow.builder=org.apache.falcon.workflow.OozieProcessWorkflowBuilder
+*.oozie.feed.workflow.builder=org.apache.falcon.workflow.OozieFeedWorkflowBuilder
+*.journal.impl=org.apache.falcon.transaction.SharedFileSystemJournal
+*.SchedulableEntityManager.impl=org.apache.falcon.resource.SchedulableEntityManager
+*.ConfigSyncService.impl=org.apache.falcon.resource.ConfigSyncService
+*.ProcessInstanceManager.impl=org.apache.falcon.resource.InstanceManager
+*.catalog.service.impl=org.apache.falcon.catalog.HiveCatalogService
+
+*.application.services=org.apache.falcon.entity.store.ConfigurationStore,\
+                        org.apache.falcon.service.ProcessSubscriberService,\
+                        org.apache.falcon.rerun.service.RetryService,\
+						org.apache.falcon.rerun.service.LateRunService,\
+						org.apache.falcon.service.SLAMonitoringService,\
+						org.apache.falcon.service.LogCleanupService
+prism.application.services=org.apache.falcon.entity.store.ConfigurationStore
+*.configstore.listeners=org.apache.falcon.entity.v0.EntityGraph,\
+                        org.apache.falcon.entity.ColoClusterRelation,\
+                        org.apache.falcon.group.FeedGroupMap,\
+                        org.apache.falcon.service.SharedLibraryHostingService
+prism.configstore.listeners=org.apache.falcon.entity.v0.EntityGraph,\
+                        org.apache.falcon.entity.ColoClusterRelation,\
+                        org.apache.falcon.group.FeedGroupMap
+*.broker.impl.class=org.apache.activemq.ActiveMQConnectionFactory
+*.shared.libs=activemq-core,ant,geronimo-j2ee-management,hadoop-distcp,jms,json-simple,oozie-client,spring-jms,s4fs-0.1.jar
+
+######### Implementation classes #########
+
+*.config.store.uri={{store_uri}}
+*.system.lib.location=${falcon.home}/server/webapp/falcon/WEB-INF/lib
+prism.system.lib.location=${falcon.home}/server/webapp/prism/WEB-INF/lib
+*.broker.url=tcp://localhost:61616
+*.retry.recorder.path=${falcon.log.dir}/retry
+
+*.falcon.cleanup.service.frequency=days(1)
+
+#default time-to-live for a JMS message 3 days (time in minutes)
+*.broker.ttlInMins=4320
+*.entity.topic=FALCON.ENTITY.TOPIC
+*.max.retry.failure.count=1
+
+######### Properties for configuring iMon client and metric #########
+*.internal.queue.size=1000
+*.current.colo=default

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/metainfo.xml
new file mode 100644
index 0000000..ad17c06
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/metainfo.xml
@@ -0,0 +1,101 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>GANGLIA</name>
+      <comment>Ganglia Metrics Collection system</comment>
+      <version>3.5.0</version>
+      <components>
+        <component>
+          <name>GANGLIA_SERVER</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/ganglia_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>GANGLIA_MONITOR</name>
+          <category>SLAVE</category>
+          <commandScript>
+            <script>scripts/ganglia_monitor.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>libganglia-3.5.0-99</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>ganglia-devel-3.5.0-99</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>ganglia-gmetad-3.5.0-99</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>ganglia-web-3.5.7-99.noarch</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>python-rrdtool.x86_64</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>ganglia-gmond-3.5.0-99</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>ganglia-gmond-modules-python-3.5.0-99</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osType>suse</osType>
+          <package>
+            <type>rpm</type>
+            <name>apache2</name>
+          </package>
+          <package>
+            <type>rpm</type>
+            <name>apache2-mod_php5</name>
+          </package>
+        </osSpecific>
+        <osSpecific>
+          <osType>centos6</osType>
+          <package>
+            <type>rpm</type>
+            <name>httpd</name>
+          </package>
+        </osSpecific>
+      </osSpecifics>
+    </service>
+  </services>
+</metainfo>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/checkGmetad.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/checkGmetad.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/checkGmetad.sh
new file mode 100644
index 0000000..e60eb31
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/checkGmetad.sh
@@ -0,0 +1,37 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+source ./gmetadLib.sh;
+
+# Before checking gmetad, check rrdcached.
+./checkRrdcached.sh;
+
+gmetadRunningPid=`getGmetadRunningPid`;
+
+if [ -n "${gmetadRunningPid}" ]
+then
+  echo "${GMETAD_BIN} running with PID ${gmetadRunningPid}";
+else
+  echo "Failed to find running ${GMETAD_BIN}";
+  exit 1;
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/checkGmond.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/checkGmond.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/checkGmond.sh
new file mode 100644
index 0000000..0cec8dc
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/checkGmond.sh
@@ -0,0 +1,62 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
+source ./gmondLib.sh;
+
+function checkGmondForCluster()
+{
+    gmondClusterName=${1};
+
+    gmondCoreConfFileName=`getGmondCoreConfFileName ${gmondClusterName}`;
+
+    # Skip over (purported) Clusters that don't have their core conf file present.
+    if [ -e "${gmondCoreConfFileName}" ]
+    then 
+      gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
+
+      if [ -n "${gmondRunningPid}" ]
+      then
+        echo "${GMOND_BIN} for cluster ${gmondClusterName} running with PID ${gmondRunningPid}";
+      else
+        echo "Failed to find running ${GMOND_BIN} for cluster ${gmondClusterName}";
+        exit 1;
+      fi
+    fi
+}
+
+# main()
+gmondClusterName=${1};
+
+if [ "x" == "x${gmondClusterName}" ]
+then
+    # No ${gmondClusterName} passed in as command-line arg, so check
+    # all the gmonds we know about.
+    for gmondClusterName in `getConfiguredGangliaClusterNames`
+    do
+        checkGmondForCluster ${gmondClusterName};
+    done
+else
+    # Just check the one ${gmondClusterName} that was asked for.
+    checkGmondForCluster ${gmondClusterName};
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/checkRrdcached.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/checkRrdcached.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/checkRrdcached.sh
new file mode 100644
index 0000000..d94db5d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/checkRrdcached.sh
@@ -0,0 +1,34 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+source ./rrdcachedLib.sh;
+
+rrdcachedRunningPid=`getRrdcachedRunningPid`;
+
+if [ -n "${rrdcachedRunningPid}" ]
+then
+  echo "${RRDCACHED_BIN} running with PID ${rrdcachedRunningPid}";
+else
+  echo "Failed to find running ${RRDCACHED_BIN}";
+  exit 1;
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/gmetad.init
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/gmetad.init b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/gmetad.init
new file mode 100644
index 0000000..20b388e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/gmetad.init
@@ -0,0 +1,73 @@
+#!/bin/sh
+# chkconfig: 2345 70 40
+# description: hdp-gmetad startup script
+# processname: hdp-gmetad
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Remember to keep this in-sync with the definition of 
+# GANGLIA_RUNTIME_COMPONENTS_UNPACK_DIR in monrpmInstaller.sh.
+HDP_GANGLIA_RUNTIME_COMPONENTS_DIR=/usr/libexec/hdp/ganglia
+HDP_GANLIA_GMETAD_STARTER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/startGmetad.sh
+HDP_GANLIA_GMETAD_STOPPER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/stopGmetad.sh
+HDP_GANLIA_GMETAD_CHECKER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/checkGmetad.sh
+
+RETVAL=0
+
+case "$1" in
+   start)
+      echo "============================="
+      echo "Starting hdp-gmetad..."
+      echo "============================="
+      [ -f ${HDP_GANLIA_GMETAD_STARTER} ] || exit 1
+      eval "${HDP_GANLIA_GMETAD_STARTER}"
+      RETVAL=$?
+      echo
+      [ $RETVAL -eq 0 ] && touch /var/lock/subsys/hdp-gmetad
+      ;;
+
+  stop)
+      echo "=================================="
+      echo "Shutting down hdp-gmetad..."
+      echo "=================================="
+      [ -f ${HDP_GANLIA_GMETAD_STOPPER} ] || exit 1
+      eval "${HDP_GANLIA_GMETAD_STOPPER}"
+      RETVAL=$?
+      echo
+      [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hdp-gmetad
+      ;;
+
+  restart|reload)
+   	$0 stop
+   	$0 start
+   	RETVAL=$?
+	;;
+  status)
+      echo "======================================="
+      echo "Checking status of hdp-gmetad..."
+      echo "======================================="
+      [ -f ${HDP_GANLIA_GMETAD_CHECKER} ] || exit 1
+      eval "${HDP_GANLIA_GMETAD_CHECKER}"
+      RETVAL=$?
+      ;;
+  *)
+	echo "Usage: $0 {start|stop|restart|status}"
+	exit 1
+esac
+
+exit $RETVAL

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/gmetadLib.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/gmetadLib.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/gmetadLib.sh
new file mode 100644
index 0000000..e28610e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/gmetadLib.sh
@@ -0,0 +1,204 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Slurp in all our user-customizable settings.
+source ./gangliaEnv.sh;
+
+# Get access to Ganglia-wide constants etc.
+source ./gangliaLib.sh;
+
+GMETAD_BIN=/usr/sbin/gmetad;
+GMETAD_CONF_FILE=${GANGLIA_CONF_DIR}/gmetad.conf;
+GMETAD_PID_FILE=${GANGLIA_RUNTIME_DIR}/gmetad.pid;
+
+function getGmetadLoggedPid()
+{
+    if [ -e "${GMETAD_PID_FILE}" ]
+    then
+        echo `cat ${GMETAD_PID_FILE}`;
+    fi
+}
+
+function getGmetadRunningPid()
+{
+    gmetadLoggedPid=`getGmetadLoggedPid`;
+
+    if [ -n "${gmetadLoggedPid}" ]
+    then
+        echo `ps -o pid=MYPID -p ${gmetadLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
+    fi
+}
+
+function generateGmetadConf()
+{
+    now=`date`;
+
+    cat <<END_OF_GMETAD_CONF_1
+#################### Generated by ${0} on ${now} ####################
+#
+#-------------------------------------------------------------------------------
+# Setting the debug_level to 1 will keep daemon in the forground and
+# show only error messages. Setting this value higher than 1 will make 
+# gmetad output debugging information and stay in the foreground.
+# default: 0
+# debug_level 10
+#
+#-------------------------------------------------------------------------------
+# What to monitor. The most important section of this file. 
+#
+# The data_source tag specifies either a cluster or a grid to
+# monitor. If we detect the source is a cluster, we will maintain a complete
+# set of RRD databases for it, which can be used to create historical 
+# graphs of the metrics. If the source is a grid (it comes from another gmetad),
+# we will only maintain summary RRDs for it.
+#
+# Format: 
+# data_source "my cluster" [polling interval] address1:port addreses2:port ...
+# 
+# The keyword 'data_source' must immediately be followed by a unique
+# string which identifies the source, then an optional polling interval in 
+# seconds. The source will be polled at this interval on average. 
+# If the polling interval is omitted, 15sec is asssumed. 
+#
+# If you choose to set the polling interval to something other than the default,
+# note that the web frontend determines a host as down if its TN value is less
+# than 4 * TMAX (20sec by default).  Therefore, if you set the polling interval
+# to something around or greater than 80sec, this will cause the frontend to
+# incorrectly display hosts as down even though they are not.
+#
+# A list of machines which service the data source follows, in the 
+# format ip:port, or name:port. If a port is not specified then 8649
+# (the default gmond port) is assumed.
+# default: There is no default value
+#
+# data_source "my cluster" 10 localhost  my.machine.edu:8649  1.2.3.5:8655
+# data_source "my grid" 50 1.3.4.7:8655 grid.org:8651 grid-backup.org:8651
+# data_source "another source" 1.3.4.7:8655  1.3.4.8
+END_OF_GMETAD_CONF_1
+
+    # Get info about all the configured Ganglia clusters.
+    getGangliaClusterInfo | while read gangliaClusterInfoLine
+    do
+        # From each, parse out ${gmondClusterName}, ${gmondMasterIP} and ${gmondPort}... 
+        read gmondClusterName gmondMasterIP gmondPort <<<`echo ${gangliaClusterInfoLine}`;
+        # ...and generate a corresponding data_source line for gmetad.conf. 
+        echo "data_source \"${gmondClusterName}\" ${gmondMasterIP}:${gmondPort}";
+    done
+
+    cat <<END_OF_GMETAD_CONF_2
+#
+# Round-Robin Archives
+# You can specify custom Round-Robin archives here (defaults are listed below)
+#
+# Old Default RRA: Keep 1 hour of metrics at 15 second resolution. 1 day at 6 minute
+# RRAs "RRA:AVERAGE:0.5:1:244" "RRA:AVERAGE:0.5:24:244" "RRA:AVERAGE:0.5:168:244" "RRA:AVERAGE:0.5:672:244" \
+#      "RRA:AVERAGE:0.5:5760:374"
+# New Default RRA
+# Keep 5856 data points at 15 second resolution assuming 15 second (default) polling. That's 1 day
+# Two weeks of data points at 1 minute resolution (average)
+#RRAs "RRA:AVERAGE:0.5:1:5856" "RRA:AVERAGE:0.5:4:20160" "RRA:AVERAGE:0.5:40:52704"
+# Retaining existing resolution
+RRAs "RRA:AVERAGE:0.5:1:244" "RRA:AVERAGE:0.5:24:244" "RRA:AVERAGE:0.5:168:244" "RRA:AVERAGE:0.5:672:244" \
+     "RRA:AVERAGE:0.5:5760:374"
+#
+#-------------------------------------------------------------------------------
+# Scalability mode. If on, we summarize over downstream grids, and respect
+# authority tags. If off, we take on 2.5.0-era behavior: we do not wrap our output
+# in <GRID></GRID> tags, we ignore all <GRID> tags we see, and always assume
+# we are the "authority" on data source feeds. This approach does not scale to
+# large groups of clusters, but is provided for backwards compatibility.
+# default: on
+# scalable off
+#
+#-------------------------------------------------------------------------------
+# The name of this Grid. All the data sources above will be wrapped in a GRID
+# tag with this name.
+# default: unspecified
+gridname "HDP_GRID"
+#
+#-------------------------------------------------------------------------------
+# The authority URL for this grid. Used by other gmetads to locate graphs
+# for our data sources. Generally points to a ganglia/
+# website on this machine.
+# default: "http://hostname/ganglia/",
+#   where hostname is the name of this machine, as defined by gethostname().
+# authority "http://mycluster.org/newprefix/"
+#
+#-------------------------------------------------------------------------------
+# List of machines this gmetad will share XML with. Localhost
+# is always trusted. 
+# default: There is no default value
+# trusted_hosts 127.0.0.1 169.229.50.165 my.gmetad.org
+#
+#-------------------------------------------------------------------------------
+# If you want any host which connects to the gmetad XML to receive
+# data, then set this value to "on"
+# default: off
+# all_trusted on
+#
+#-------------------------------------------------------------------------------
+# If you don't want gmetad to setuid then set this to off
+# default: on
+# setuid off
+#
+#-------------------------------------------------------------------------------
+# User gmetad will setuid to (defaults to "nobody")
+# default: "nobody"
+setuid_username "${GMETAD_USER}"
+#
+#-------------------------------------------------------------------------------
+# Umask to apply to created rrd files and grid directory structure
+# default: 0 (files are public)
+# umask 022
+#
+#-------------------------------------------------------------------------------
+# The port gmetad will answer requests for XML
+# default: 8651
+# xml_port 8651
+#
+#-------------------------------------------------------------------------------
+# The port gmetad will answer queries for XML. This facility allows
+# simple subtree and summation views of the XML tree.
+# default: 8652
+# interactive_port 8652
+#
+#-------------------------------------------------------------------------------
+# The number of threads answering XML requests
+# default: 4
+# server_threads 10
+#
+#-------------------------------------------------------------------------------
+# Where gmetad stores its round-robin databases
+# default: "/var/lib/ganglia/rrds"
+# rrd_rootdir "/some/other/place"
+#
+#-------------------------------------------------------------------------------
+# In earlier versions of gmetad, hostnames were handled in a case
+# sensitive manner
+# If your hostname directories have been renamed to lower case,
+# set this option to 0 to disable backward compatibility.
+# From version 3.2, backwards compatibility will be disabled by default.
+# default: 1   (for gmetad < 3.2)
+# default: 0   (for gmetad >= 3.2)
+case_sensitive_hostnames 1
+END_OF_GMETAD_CONF_2
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/gmond.init
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/gmond.init b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/gmond.init
new file mode 100644
index 0000000..afb7026
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/gmond.init
@@ -0,0 +1,73 @@
+#!/bin/sh
+# chkconfig: 2345 70 40
+# description: hdp-gmond startup script
+# processname: hdp-gmond
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Remember to keep this in-sync with the definition of 
+# GANGLIA_RUNTIME_COMPONENTS_UNPACK_DIR in monrpmInstaller.sh.
+HDP_GANGLIA_RUNTIME_COMPONENTS_DIR=/usr/libexec/hdp/ganglia
+HDP_GANLIA_GMOND_STARTER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/startGmond.sh
+HDP_GANLIA_GMOND_STOPPER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/stopGmond.sh
+HDP_GANLIA_GMOND_CHECKER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/checkGmond.sh
+
+RETVAL=0
+
+case "$1" in
+   start)
+      echo "============================="
+      echo "Starting hdp-gmond..."
+      echo "============================="
+      [ -f ${HDP_GANLIA_GMOND_STARTER} ] || exit 1
+      eval "${HDP_GANLIA_GMOND_STARTER}"
+      RETVAL=$?
+      echo
+      [ $RETVAL -eq 0 ] && touch /var/lock/subsys/hdp-gmond
+      ;;
+
+  stop)
+      echo "=================================="
+      echo "Shutting down hdp-gmond..."
+      echo "=================================="
+      [ -f ${HDP_GANLIA_GMOND_STOPPER} ] || exit 1
+      eval "${HDP_GANLIA_GMOND_STOPPER}"
+      RETVAL=$?
+      echo
+      [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hdp-gmond
+      ;;
+
+  restart|reload)
+   	$0 stop
+   	$0 start
+   	RETVAL=$?
+	;;
+  status)
+      echo "======================================="
+      echo "Checking status of hdp-gmond..."
+      echo "======================================="
+      [ -f ${HDP_GANLIA_GMOND_CHECKER} ] || exit 1
+      eval "${HDP_GANLIA_GMOND_CHECKER}"
+      RETVAL=$?
+      ;;
+  *)
+	echo "Usage: $0 {start|stop|restart|status}"
+	exit 1
+esac
+
+exit $RETVAL

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/gmondLib.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/gmondLib.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/gmondLib.sh
new file mode 100644
index 0000000..87da4dd
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/gmondLib.sh
@@ -0,0 +1,545 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Slurp in all our user-customizable settings.
+source ./gangliaEnv.sh;
+
+# Get access to Ganglia-wide constants etc.
+source ./gangliaLib.sh;
+
+GMOND_BIN=/usr/sbin/gmond;
+GMOND_CORE_CONF_FILE=gmond.core.conf;
+GMOND_MASTER_CONF_FILE=gmond.master.conf;
+GMOND_SLAVE_CONF_FILE=gmond.slave.conf;
+GMOND_PID_FILE=gmond.pid;
+
+# Functions.
+function getGmondCoreConfFileName()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        # ${clusterName} is not empty. 
+        echo "${GANGLIA_CONF_DIR}/${clusterName}/${GMOND_CORE_CONF_FILE}";
+    else
+        echo "${GANGLIA_CONF_DIR}/${GMOND_CORE_CONF_FILE}";
+    fi
+}
+
+function getGmondMasterConfFileName()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        # ${clusterName} is not empty. 
+        echo "${GANGLIA_CONF_DIR}/${clusterName}/conf.d/${GMOND_MASTER_CONF_FILE}";
+    else
+        echo "${GANGLIA_CONF_DIR}/conf.d/${GMOND_MASTER_CONF_FILE}";
+    fi
+}
+
+function getGmondSlaveConfFileName()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        # ${clusterName} is not empty. 
+        echo "${GANGLIA_CONF_DIR}/${clusterName}/conf.d/${GMOND_SLAVE_CONF_FILE}";
+    else
+        echo "${GANGLIA_CONF_DIR}/conf.d/${GMOND_SLAVE_CONF_FILE}";
+    fi
+}
+
+function getGmondPidFileName()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        # ${clusterName} is not empty. 
+        echo "${GANGLIA_RUNTIME_DIR}/${clusterName}/${GMOND_PID_FILE}";
+    else
+        echo "${GANGLIA_RUNTIME_DIR}/${GMOND_PID_FILE}";
+    fi
+}
+
+function getGmondLoggedPid()
+{
+    gmondPidFile=`getGmondPidFileName ${1}`;
+
+    if [ -e "${gmondPidFile}" ]
+    then
+        echo `cat ${gmondPidFile}`;
+    fi
+}
+
+function getGmondRunningPid()
+{
+    gmondLoggedPid=`getGmondLoggedPid ${1}`;
+
+    if [ -n "${gmondLoggedPid}" ]
+    then
+        echo `ps -o pid=MYPID -p ${gmondLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
+    fi
+}
+
+function generateGmondCoreConf()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
+
+        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
+        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
+        then
+            now=`date`;
+
+            cat << END_OF_GMOND_CORE_CONF
+#################### Generated by ${0} on ${now} ####################
+#
+/* This configuration is as close to 2.5.x default behavior as possible
+   The values closely match ./gmond/metric.h definitions in 2.5.x */
+globals {
+  daemonize = yes
+  setuid = yes
+  user = ${GMOND_USER}
+  debug_level = 0
+  max_udp_msg_len = 1472
+  mute = no
+  deaf = no 
+  allow_extra_data = yes
+  host_dmax = 0 /*secs */
+  host_tmax = 20 /*secs */
+  cleanup_threshold = 300 /*secs */
+  gexec = no
+  send_metadata_interval = 30 /*secs */
+}
+
+/*
+ * The cluster attributes specified will be used as part of the <CLUSTER>
+ * tag that will wrap all hosts collected by this instance.
+ */
+cluster {
+  name = "${gmondClusterName}"
+  owner = "unspecified"
+  latlong = "unspecified"
+  url = "unspecified"
+}
+
+/* The host section describes attributes of the host, like the location */
+host {
+  location = "unspecified"
+}
+
+/* You can specify as many tcp_accept_channels as you like to share
+ * an XML description of the state of the cluster.
+ *
+ * At the very least, every gmond must expose its XML state to 
+ * queriers from localhost.
+ */
+tcp_accept_channel {
+  bind = localhost
+  port = ${gmondPort}
+}
+
+/* Each metrics module that is referenced by gmond must be specified and
+   loaded. If the module has been statically linked with gmond, it does
+   not require a load path. However all dynamically loadable modules must
+   include a load path. */
+modules {
+  module {
+    name = "core_metrics"
+  }
+  module {
+    name = "cpu_module"
+    path = "modcpu.so"
+  }
+  module {
+    name = "disk_module"
+    path = "moddisk.so"
+  }
+  module {
+    name = "load_module"
+    path = "modload.so"
+  }
+  module {
+    name = "mem_module"
+    path = "modmem.so"
+  }
+  module {
+    name = "net_module"
+    path = "modnet.so"
+  }
+  module {
+    name = "proc_module"
+    path = "modproc.so"
+  }
+  module {
+    name = "sys_module"
+    path = "modsys.so"
+  }
+}
+
+/* The old internal 2.5.x metric array has been replaced by the following
+   collection_group directives.  What follows is the default behavior for
+   collecting and sending metrics that is as close to 2.5.x behavior as
+   possible. */
+
+/* This collection group will cause a heartbeat (or beacon) to be sent every
+   20 seconds.  In the heartbeat is the GMOND_STARTED data which expresses
+   the age of the running gmond. */
+collection_group {
+  collect_once = yes
+  time_threshold = 20
+  metric {
+    name = "heartbeat"
+  }
+}
+
+/* This collection group will send general info about this host total memory every
+   180 secs.
+   This information doesn't change between reboots and is only collected
+   once. This information needed for heatmap showing */
+ collection_group {
+   collect_once = yes
+   time_threshold = 180
+   metric {
+    name = "mem_total"
+    title = "Memory Total"
+   }
+ }
+
+/* This collection group will send general info about this host every
+   1200 secs.
+   This information doesn't change between reboots and is only collected
+   once. */
+collection_group {
+  collect_once = yes
+  time_threshold = 1200
+  metric {
+    name = "cpu_num"
+    title = "CPU Count"
+  }
+  metric {
+    name = "cpu_speed"
+    title = "CPU Speed"
+  }
+  /* Should this be here? Swap can be added/removed between reboots. */
+  metric {
+    name = "swap_total"
+    title = "Swap Space Total"
+  }
+  metric {
+    name = "boottime"
+    title = "Last Boot Time"
+  }
+  metric {
+    name = "machine_type"
+    title = "Machine Type"
+  }
+  metric {
+    name = "os_name"
+    title = "Operating System"
+  }
+  metric {
+    name = "os_release"
+    title = "Operating System Release"
+  }
+  metric {
+    name = "location"
+    title = "Location"
+  }
+}
+
+/* This collection group will send the status of gexecd for this host
+   every 300 secs.*/
+/* Unlike 2.5.x the default behavior is to report gexecd OFF. */
+collection_group {
+  collect_once = yes
+  time_threshold = 300
+  metric {
+    name = "gexec"
+    title = "Gexec Status"
+  }
+}
+
+/* This collection group will collect the CPU status info every 20 secs.
+   The time threshold is set to 90 seconds.  In honesty, this
+   time_threshold could be set significantly higher to reduce
+   unneccessary  network chatter. */
+collection_group {
+  collect_every = 20
+  time_threshold = 90
+  /* CPU status */
+  metric {
+    name = "cpu_user"
+    value_threshold = "1.0"
+    title = "CPU User"
+  }
+  metric {
+    name = "cpu_system"
+    value_threshold = "1.0"
+    title = "CPU System"
+  }
+  metric {
+    name = "cpu_idle"
+    value_threshold = "5.0"
+    title = "CPU Idle"
+  }
+  metric {
+    name = "cpu_nice"
+    value_threshold = "1.0"
+    title = "CPU Nice"
+  }
+  metric {
+    name = "cpu_aidle"
+    value_threshold = "5.0"
+    title = "CPU aidle"
+  }
+  metric {
+    name = "cpu_wio"
+    value_threshold = "1.0"
+    title = "CPU wio"
+  }
+  /* The next two metrics are optional if you want more detail...
+     ... since they are accounted for in cpu_system.
+  metric {
+    name = "cpu_intr"
+    value_threshold = "1.0"
+    title = "CPU intr"
+  }
+  metric {
+    name = "cpu_sintr"
+    value_threshold = "1.0"
+    title = "CPU sintr"
+  }
+  */
+}
+
+collection_group {
+  collect_every = 20
+  time_threshold = 90
+  /* Load Averages */
+  metric {
+    name = "load_one"
+    value_threshold = "1.0"
+    title = "One Minute Load Average"
+  }
+  metric {
+    name = "load_five"
+    value_threshold = "1.0"
+    title = "Five Minute Load Average"
+  }
+  metric {
+    name = "load_fifteen"
+    value_threshold = "1.0"
+    title = "Fifteen Minute Load Average"
+  }
+}
+
+/* This group collects the number of running and total processes */
+collection_group {
+  collect_every = 80
+  time_threshold = 950
+  metric {
+    name = "proc_run"
+    value_threshold = "1.0"
+    title = "Total Running Processes"
+  }
+  metric {
+    name = "proc_total"
+    value_threshold = "1.0"
+    title = "Total Processes"
+  }
+}
+
+/* This collection group grabs the volatile memory metrics every 40 secs and
+   sends them at least every 180 secs.  This time_threshold can be increased
+   significantly to reduce unneeded network traffic. */
+collection_group {
+  collect_every = 40
+  time_threshold = 180
+  metric {
+    name = "mem_free"
+    value_threshold = "1024.0"
+    title = "Free Memory"
+  }
+  metric {
+    name = "mem_shared"
+    value_threshold = "1024.0"
+    title = "Shared Memory"
+  }
+  metric {
+    name = "mem_buffers"
+    value_threshold = "1024.0"
+    title = "Memory Buffers"
+  }
+  metric {
+    name = "mem_cached"
+    value_threshold = "1024.0"
+    title = "Cached Memory"
+  }
+  metric {
+    name = "swap_free"
+    value_threshold = "1024.0"
+    title = "Free Swap Space"
+  }
+}
+
+collection_group {
+  collect_every = 40
+  time_threshold = 300
+  metric {
+    name = "bytes_out"
+    value_threshold = 4096
+    title = "Bytes Sent"
+  }
+  metric {
+    name = "bytes_in"
+    value_threshold = 4096
+    title = "Bytes Received"
+  }
+  metric {
+    name = "pkts_in"
+    value_threshold = 256
+    title = "Packets Received"
+  }
+  metric {
+    name = "pkts_out"
+    value_threshold = 256
+    title = "Packets Sent"
+  }
+}
+
+
+collection_group {
+  collect_every = 40
+  time_threshold = 180
+  metric {
+    name = "disk_free"
+    value_threshold = 1.0
+    title = "Disk Space Available"
+  }
+  metric {
+    name = "part_max_used"
+    value_threshold = 1.0
+    title = "Maximum Disk Space Used"
+  }
+  metric {
+    name = "disk_total"
+    value_threshold = 1.0
+    title = "Total Disk Space"
+  }
+}
+
+udp_recv_channel {
+    port = 0
+}
+
+
+include ("${GANGLIA_CONF_DIR}/${gmondClusterName}/conf.d/*.conf")
+END_OF_GMOND_CORE_CONF
+        else
+            return 2;
+        fi
+    else
+        return 1;
+    fi
+}
+
+function generateGmondMasterConf
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
+
+        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
+        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
+        then
+            now=`date`;
+
+            cat << END_OF_GMOND_MASTER_CONF
+#################### Generated by ${0} on ${now} ####################
+/* Masters only receive; they never send. */
+udp_recv_channel {
+  bind = ${gmondMasterIP}
+  port = ${gmondPort}
+}
+
+/* The gmond cluster master must additionally provide an XML 
+ * description of the cluster to the gmetad that will query it.
+ */
+tcp_accept_channel {
+  bind = ${gmondMasterIP}
+  port = ${gmondPort}
+}
+END_OF_GMOND_MASTER_CONF
+        else
+            return 2;
+        fi
+    else
+        return 1;
+    fi
+}
+
+function generateGmondSlaveConf
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
+
+        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
+        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
+        then
+            now=`date`;
+
+            cat << END_OF_GMOND_SLAVE_CONF
+#################### Generated by ${0} on ${now} ####################
+/* Slaves only send; they never receive. */
+udp_send_channel {
+  #bind_hostname = yes # Highly recommended, soon to be default.
+                       # This option tells gmond to use a source address
+                       # that resolves to the machine's hostname.  Without
+                       # this, the metrics may appear to come from any
+                       # interface and the DNS names associated with
+                       # those IPs will be used to create the RRDs.
+  host = ${gmondMasterIP}
+  port = ${gmondPort}
+  ttl = 1
+}
+END_OF_GMOND_SLAVE_CONF
+        else
+            return 2;
+        fi
+    else
+        return 1;
+    fi
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/rrd.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/rrd.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/rrd.py
new file mode 100644
index 0000000..3fe6901
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/rrd.py
@@ -0,0 +1,213 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import cgi
+import os
+import rrdtool
+import sys
+import time
+import re
+import urlparse
+
+# place this script in /var/www/cgi-bin of the Ganglia collector
+# requires 'yum install rrdtool-python' on the Ganglia collector
+
+
+def printMetric(clusterName, hostName, metricName, file, cf, start, end,
+                resolution, pointInTime):
+  if clusterName.endswith("rrds"):
+    clusterName = ""
+
+  args = [file, cf]
+
+  if start is not None:
+    args.extend(["-s", start])
+
+  if end is not None:
+    args.extend(["-e", end])
+
+  if resolution is not None:
+    args.extend(["-r", resolution])
+
+  rrdMetric = rrdtool.fetch(args)
+  # ds_name
+  sys.stdout.write(rrdMetric[1][0])
+  sys.stdout.write("\n")
+
+  sys.stdout.write(clusterName)
+  sys.stdout.write("\n")
+  sys.stdout.write(hostName)
+  sys.stdout.write("\n")
+  sys.stdout.write(metricName)
+  sys.stdout.write("\n")
+
+  # write time
+  sys.stdout.write(str(rrdMetric[0][0]))
+  sys.stdout.write("\n")
+  # write step
+  sys.stdout.write(str(rrdMetric[0][2]))
+  sys.stdout.write("\n")
+
+  if not pointInTime:
+    valueCount = 0
+    lastValue = None
+
+    for tuple in rrdMetric[2]:
+
+      thisValue = tuple[0]
+
+      if valueCount > 0 and thisValue == lastValue:
+        valueCount += 1
+      else:
+        if valueCount > 1:
+          sys.stdout.write("[~r]")
+          sys.stdout.write(str(valueCount))
+          sys.stdout.write("\n")
+
+        if thisValue is None:
+          sys.stdout.write("[~n]\n")
+        else:
+          sys.stdout.write(str(thisValue))
+          sys.stdout.write("\n")
+
+        valueCount = 1
+        lastValue = thisValue
+  else:
+    value = None
+    idx = -1
+    tuple = rrdMetric[2]
+    tupleLastIdx = len(tuple) * -1
+
+    while value is None and idx >= tupleLastIdx:
+      value = tuple[idx][0]
+      idx -= 1
+
+    if value is not None:
+      sys.stdout.write(str(value))
+      sys.stdout.write("\n")
+
+  sys.stdout.write("[~EOM]\n")
+  return
+
+
+def stripList(l):
+  return ([x.strip() for x in l])
+
+
+sys.stdout.write("Content-type: text/plain\n\n")
+
+# write start time
+sys.stdout.write(str(time.mktime(time.gmtime())))
+sys.stdout.write("\n")
+
+requestMethod = os.environ['REQUEST_METHOD']
+
+if requestMethod == 'POST':
+  postData = sys.stdin.readline()
+  queryString = cgi.parse_qs(postData)
+  queryString = dict((k, v[0]) for k, v in queryString.items())
+elif requestMethod == 'GET':
+  queryString = dict(cgi.parse_qsl(os.environ['QUERY_STRING']));
+
+if "m" in queryString:
+  metricParts = queryString["m"].split(",")
+else:
+  metricParts = [""]
+metricParts = stripList(metricParts)
+
+hostParts = []
+if "h" in queryString:
+  hostParts = queryString["h"].split(",")
+hostParts = stripList(hostParts)
+
+if "c" in queryString:
+  clusterParts = queryString["c"].split(",")
+else:
+  clusterParts = [""]
+clusterParts = stripList(clusterParts)
+
+if "p" in queryString:
+  rrdPath = queryString["p"]
+else:
+  rrdPath = "/var/lib/ganglia/rrds/"
+
+start = None
+if "s" in queryString:
+  start = queryString["s"]
+
+end = None
+if "e" in queryString:
+  end = queryString["e"]
+
+resolution = None
+if "r" in queryString:
+  resolution = queryString["r"]
+
+if "cf" in queryString:
+  cf = queryString["cf"]
+else:
+  cf = "AVERAGE"
+
+if "pt" in queryString:
+  pointInTime = True
+else:
+  pointInTime = False
+
+
+def _walk(*args, **kwargs):
+  for root, dirs, files in os.walk(*args, **kwargs):
+    for dir in dirs:
+      qualified_dir = os.path.join(root, dir)
+      if os.path.islink(qualified_dir):
+        for x in os.walk(qualified_dir, **kwargs):
+          yield x
+    yield (root, dirs, files)
+
+
+for cluster in clusterParts:
+  for path, dirs, files in _walk(rrdPath + cluster):
+    pathParts = path.split("/")
+    #Process only path which contains files. If no host parameter passed - process all hosts folders and summary info
+    #If host parameter passed - process only this host folder
+    if len(files) > 0 and (len(hostParts) == 0 or pathParts[-1] in hostParts):
+      for metric in metricParts:
+        file = metric + ".rrd"
+        fileFullPath = os.path.join(path, file)
+        if os.path.exists(fileFullPath):
+          #Exact name of metric
+          printMetric(pathParts[-2], pathParts[-1], file[:-4],
+                      os.path.join(path, file), cf, start, end, resolution,
+                      pointInTime)
+        else:
+          #Regex as metric name
+          metricRegex = metric + '\.rrd$'
+          p = re.compile(metricRegex)
+          matchedFiles = filter(p.match, files)
+          for matchedFile in matchedFiles:
+            printMetric(pathParts[-2], pathParts[-1], matchedFile[:-4],
+                        os.path.join(path, matchedFile), cf, start, end,
+                        resolution, pointInTime)
+
+sys.stdout.write("[~EOF]\n")
+# write end time
+sys.stdout.write(str(time.mktime(time.gmtime())))
+sys.stdout.write("\n")
+
+sys.stdout.flush


[02/37] AMBARI-4341. Rename 2.0.8 to 2.1.1 in the stack definition. (mahadev)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/files/validateYarnComponentStatus.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/files/validateYarnComponentStatus.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/files/validateYarnComponentStatus.py
new file mode 100644
index 0000000..dac198a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/files/validateYarnComponentStatus.py
@@ -0,0 +1,165 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import optparse
+import subprocess
+import json
+
+RESOURCEMANAGER = 'rm'
+NODEMANAGER = 'nm'
+HISTORYSERVER = 'hs'
+
+STARTED_STATE = 'STARTED'
+RUNNING_STATE = 'RUNNING'
+
+#Return reponse for given path and address
+def getResponse(path, address, ssl_enabled):
+
+  command = "curl"
+  httpGssnegotiate = "--negotiate"
+  userpswd = "-u:"
+  insecure = "-k"# This is smoke test, no need to check CA of server
+  if ssl_enabled:
+    url = 'https://' + address + path
+  else:
+    url = 'http://' + address + path
+      
+  command_with_flags = [command,httpGssnegotiate,userpswd,insecure,url]
+  try:
+    proc = subprocess.Popen(command_with_flags, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+    (stdout, stderr) = proc.communicate()
+    response = json.loads(stdout)
+    if response == None:
+      print 'There is no response for url: ' + str(url)
+      exit(1)
+    return response
+  except Exception as e:
+    print 'Error getting response for url:' + str(url), e
+    exit(1)
+
+#Verify that REST api is available for given component
+def validateAvailability(component, path, address, ssl_enabled):
+
+  try:
+    response = getResponse(path, address, ssl_enabled)
+    is_valid = validateAvailabilityResponse(component, response)
+    if not is_valid:
+      exit(1)
+  except Exception as e:
+    print 'Error checking availability status of component', e
+    exit(1)
+
+#Validate component-specific response
+def validateAvailabilityResponse(component, response):
+  try:
+    if component == RESOURCEMANAGER:
+      rm_state = response['clusterInfo']['state']
+      if rm_state == STARTED_STATE:
+        return True
+      else:
+        print 'Resourcemanager is not started'
+        return False
+
+    elif component == NODEMANAGER:
+      node_healthy = bool(response['nodeInfo']['nodeHealthy'])
+      if node_healthy:
+        return True
+      else:
+        return False
+    elif component == HISTORYSERVER:
+      hs_start_time = response['historyInfo']['startedOn']
+      if hs_start_time > 0:
+        return True
+      else:
+        return False
+    else:
+      return False
+  except Exception as e:
+    print 'Error validation of availability response for ' + str(component), e
+    return False
+
+#Verify that component has required resources to work
+def validateAbility(component, path, address, ssl_enabled):
+
+  try:
+    response = getResponse(path, address, ssl_enabled)
+    is_valid = validateAbilityResponse(component, response)
+    if not is_valid:
+      exit(1)
+  except Exception as e:
+    print 'Error checking ability of component', e
+    exit(1)
+
+#Validate component-specific response that it has required resources to work
+def validateAbilityResponse(component, response):
+  try:
+    if component == RESOURCEMANAGER:
+      nodes = []
+      if response.has_key('nodes') and not response['nodes'] == None and response['nodes'].has_key('node'):
+        nodes = response['nodes']['node']
+      connected_nodes_count = len(nodes)
+      if connected_nodes_count == 0:
+        print 'There is no connected nodemanagers to resourcemanager'
+        return False
+      active_nodes = filter(lambda x: x['state'] == RUNNING_STATE, nodes)
+      active_nodes_count = len(active_nodes)
+
+      if connected_nodes_count == 0:
+        print 'There is no connected active nodemanagers to resourcemanager'
+        return False
+      else:
+        return True
+    else:
+      return False
+  except Exception as e:
+    print 'Error validation of ability response', e
+    return False
+
+#
+# Main.
+#
+def main():
+  parser = optparse.OptionParser(usage="usage: %prog [options] component ")
+  parser.add_option("-p", "--port", dest="address", help="Host:Port for REST API of a desired component")
+  parser.add_option("-s", "--ssl", dest="ssl_enabled", help="Is SSL enabled for UI of component")
+
+  (options, args) = parser.parse_args()
+
+  component = args[0]
+  
+  address = options.address
+  ssl_enabled = (options.ssl_enabled) in 'true'
+  if component == RESOURCEMANAGER:
+    path = '/ws/v1/cluster/info'
+  elif component == NODEMANAGER:
+    path = '/ws/v1/node/info'
+  elif component == HISTORYSERVER:
+    path = '/ws/v1/history/info'
+  else:
+    parser.error("Invalid component")
+
+  validateAvailability(component, path, address, ssl_enabled)
+
+  if component == RESOURCEMANAGER:
+    path = '/ws/v1/cluster/nodes'
+    validateAbility(component, path, address, ssl_enabled)
+
+if __name__ == "__main__":
+  main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/__init__.py
new file mode 100644
index 0000000..a582077
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/__init__.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/historyserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/historyserver.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/historyserver.py
new file mode 100644
index 0000000..9b6003c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/historyserver.py
@@ -0,0 +1,60 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+import sys
+from resource_management import *
+
+from yarn import yarn
+from service import service
+
+class Histroryserver(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    yarn()
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    service('historyserver',
+            action='start'
+    )
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    service('historyserver',
+            action='stop'
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.histroyserver_pid_file)
+
+if __name__ == "__main__":
+  Histroryserver().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/mapred_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/mapred_service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/mapred_service_check.py
new file mode 100644
index 0000000..3b789f8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/mapred_service_check.py
@@ -0,0 +1,74 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+class MapReduce2ServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    jar_path = format("{hadoop_mapred2_jar_location}/{hadoopMapredExamplesJarName}")
+    input_file = format("/user/{smokeuser}/mapredsmokeinput")
+    output_file = format("/user/{smokeuser}/mapredsmokeoutput")
+
+    cleanup_cmd = format("fs -rm -r -f {output_file} {input_file}")
+    create_file_cmd = format("fs -put /etc/passwd {input_file}")
+    test_cmd = format("fs -test -e {output_file}")
+    run_wordcount_job = format("jar {jar_path} wordcount {input_file} {output_file}")
+
+    if params.security_enabled:
+      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser};")
+
+      Execute(kinit_cmd,
+              user=params.smokeuser
+      )
+
+    ExecuteHadoop(cleanup_cmd,
+                  tries=1,
+                  try_sleep=5,
+                  user=params.smokeuser,
+                  conf_dir=params.hadoop_conf_dir
+    )
+
+    ExecuteHadoop(create_file_cmd,
+                  tries=1,
+                  try_sleep=5,
+                  user=params.smokeuser,
+                  conf_dir=params.hadoop_conf_dir
+    )
+
+    ExecuteHadoop(run_wordcount_job,
+                  tries=1,
+                  try_sleep=5,
+                  user=params.smokeuser,
+                  conf_dir=params.hadoop_conf_dir,
+                  logoutput=True
+    )
+
+    ExecuteHadoop(test_cmd,
+                  user=params.smokeuser,
+                  conf_dir=params.hadoop_conf_dir
+    )
+
+if __name__ == "__main__":
+  MapReduce2ServiceCheck().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/mapreduce2_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/mapreduce2_client.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/mapreduce2_client.py
new file mode 100644
index 0000000..54119a7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/mapreduce2_client.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+
+from yarn import yarn
+
+class MapReduce2Client(Script):
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    yarn()
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+  MapReduce2Client().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/nodemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/nodemanager.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/nodemanager.py
new file mode 100644
index 0000000..dbeaca0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/nodemanager.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+
+from yarn import yarn
+from service import service
+
+class Nodemanager(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    yarn()
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    service('nodemanager',
+            action='start'
+    )
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    service('nodemanager',
+            action='stop'
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.nodemanager_pid_file)
+
+if __name__ == "__main__":
+  Nodemanager().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/params.py
new file mode 100644
index 0000000..f1b22bc
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/params.py
@@ -0,0 +1,89 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+import status_params
+
+# server configurations
+config = Script.get_config()
+
+config_dir = "/etc/hadoop/conf"
+
+mapred_user = status_params.mapred_user
+yarn_user = status_params.yarn_user
+hdfs_user = config['configurations']['global']['hdfs_user']
+
+smokeuser = config['configurations']['global']['smokeuser']
+security_enabled = config['configurations']['global']['security_enabled']
+smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
+yarn_executor_container_group = config['configurations']['yarn-site']['yarn.nodemanager.linux-container-executor.group']
+kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+rm_host = config['clusterHostInfo']['rm_host'][0]
+rm_port = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'].split(':')[-1]
+rm_https_port = "8090"
+
+java64_home = config['hostLevelParams']['java_home']
+hadoop_ssl_enabled = default("/configurations/core-site/hadoop.ssl.enabled", False)
+
+hadoop_libexec_dir = '/usr/lib/hadoop/libexec'
+hadoop_yarn_home = '/usr/lib/hadoop-yarn'
+yarn_heapsize = config['configurations']['global']['yarn_heapsize']
+resourcemanager_heapsize = config['configurations']['global']['resourcemanager_heapsize']
+nodemanager_heapsize = config['configurations']['global']['nodemanager_heapsize']
+
+yarn_log_dir_prefix = config['configurations']['global']['yarn_log_dir_prefix']
+yarn_pid_dir_prefix = status_params.yarn_pid_dir_prefix
+mapred_pid_dir_prefix = status_params.mapred_pid_dir_prefix
+mapred_log_dir_prefix = config['configurations']['global']['mapred_log_dir_prefix']
+
+rm_webui_address = format("{rm_host}:{rm_port}")
+rm_webui_https_address = format("{rm_host}:{rm_https_port}")
+nm_webui_address = config['configurations']['yarn-site']['yarn.nodemanager.webapp.address']
+hs_webui_address = config['configurations']['mapred-site']['mapreduce.jobhistory.webapp.address']
+
+nm_local_dirs = config['configurations']['yarn-site']['yarn.nodemanager.local-dirs']
+nm_log_dirs = config['configurations']['yarn-site']['yarn.nodemanager.log-dirs']
+
+
+hadoop_mapred2_jar_location = "/usr/lib/hadoop-mapreduce"
+distrAppJarName = "hadoop-yarn-applications-distributedshell-2.*.jar"
+hadoopMapredExamplesJarName = "hadoop-mapreduce-examples-2.*.jar"
+
+yarn_pid_dir = status_params.yarn_pid_dir
+mapred_pid_dir = status_params.mapred_pid_dir
+
+mapred_log_dir = format("{mapred_log_dir_prefix}/{mapred_user}")
+yarn_log_dir = format("{yarn_log_dir_prefix}/{yarn_user}")
+mapred_job_summary_log = format("{mapred_log_dir_prefix}/{mapred_user}/hadoop-mapreduce.jobsummary.log")
+yarn_job_summary_log = format("{yarn_log_dir_prefix}/{yarn_user}/hadoop-mapreduce.jobsummary.log")
+
+mapred_bin = "/usr/lib/hadoop-mapreduce/sbin"
+yarn_bin = "/usr/lib/hadoop-yarn/sbin"
+
+user_group = config['configurations']['global']['user_group']
+limits_conf_dir = "/etc/security/limits.d"
+hadoop_conf_dir = "/etc/hadoop/conf"
+yarn_container_bin = "/usr/lib/hadoop-yarn/bin"
+
+#exclude file
+exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
+exclude_file_path = config['configurations']['yarn-site']['yarn.resourcemanager.nodes.exclude-path']
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/resourcemanager.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/resourcemanager.py
new file mode 100644
index 0000000..0540670
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/resourcemanager.py
@@ -0,0 +1,112 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+
+from yarn import yarn
+from service import service
+
+
+class Resourcemanager(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+
+    env.set_params(params)
+    yarn()
+
+  def start(self, env):
+    import params
+
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    service('resourcemanager',
+            action='start'
+    )
+
+  def stop(self, env):
+    import params
+
+    env.set_params(params)
+
+    service('resourcemanager',
+            action='stop'
+    )
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    check_process_status(status_params.resourcemanager_pid_file)
+    pass
+
+  def decommission(self, env):
+    import params
+
+    env.set_params(params)
+
+    yarn_user = params.yarn_user
+    conf_dir = params.config_dir
+    user_group = params.user_group
+
+    yarn_refresh_cmd = format("/usr/bin/yarn --config {conf_dir} rmadmin -refreshNodes")
+
+    File(params.exclude_file_path,
+         content=Template("exclude_hosts_list.j2"),
+         owner=yarn_user,
+         group=user_group
+    )
+
+    Execute(yarn_refresh_cmd,
+            user=yarn_user
+    )
+    pass
+
+
+if __name__ == "__main__":
+  Resourcemanager().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/service.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/service.py
new file mode 100644
index 0000000..441ef6c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/service.py
@@ -0,0 +1,65 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+
+def service(
+    name,
+    action='start'):
+
+  import params
+
+  if (name == 'historyserver'):
+    daemon = format("{mapred_bin}/mr-jobhistory-daemon.sh")
+    pid_file = format("{mapred_pid_dir}/mapred-{mapred_user}-{name}.pid")
+    usr = params.mapred_user
+  else:
+    daemon = format("{yarn_bin}/yarn-daemon.sh")
+    pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-{name}.pid")
+    usr = params.yarn_user
+
+  cmd = format("export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && {daemon} --config {config_dir}")
+
+  if action == 'start':
+    daemon_cmd = format("{cmd} start {name}")
+    no_op = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
+    Execute(daemon_cmd,
+            user=usr,
+            not_if=no_op
+    )
+
+    Execute(no_op,
+            user=usr,
+            not_if=no_op,
+            initial_wait=5
+    )
+
+  elif action == 'stop':
+    daemon_cmd = format("{cmd} stop {name}")
+    Execute(daemon_cmd,
+            user=usr,
+    )
+    rm_pid = format("rm -f {pid_file}")
+    Execute(rm_pid,
+            user=usr
+    )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/service_check.py
new file mode 100644
index 0000000..c53cc78
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/service_check.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+class ServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    run_yarn_check_cmd = "/usr/bin/yarn node -list"
+
+    component_type = 'rm'
+    if params.hadoop_ssl_enabled:
+      component_address = params.rm_webui_https_address
+    else:
+      component_address = params.rm_webui_address
+
+    validateStatusFileName = "validateYarnComponentStatus.py"
+    validateStatusFilePath = format("/tmp/{validateStatusFileName}")
+
+    validateStatusCmd = format("{validateStatusFilePath} {component_type} -p {component_address} -s {hadoop_ssl_enabled}")
+
+    if params.security_enabled:
+      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser};")
+      smoke_cmd = format("{kinit_cmd} {validateStatusCmd}")
+    else:
+      smoke_cmd = validateStatusCmd
+
+    File(validateStatusFilePath,
+         content=StaticFile(validateStatusFileName),
+         mode=0755
+    )
+
+    Execute(smoke_cmd,
+            tries=3,
+            try_sleep=5,
+            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+            user=params.smokeuser,
+            logoutput=True
+    )
+
+    Execute(run_yarn_check_cmd,
+                  user=params.smokeuser
+    )
+
+if __name__ == "__main__":
+  ServiceCheck().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/status_params.py
new file mode 100644
index 0000000..e554513
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/status_params.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+mapred_user = config['configurations']['global']['mapred_user']
+yarn_user = config['configurations']['global']['yarn_user']
+yarn_pid_dir_prefix = config['configurations']['global']['yarn_pid_dir_prefix']
+mapred_pid_dir_prefix = config['configurations']['global']['mapred_pid_dir_prefix']
+yarn_pid_dir = format("{yarn_pid_dir_prefix}/{yarn_user}")
+mapred_pid_dir = format("{mapred_pid_dir_prefix}/{mapred_user}")
+
+resourcemanager_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-resourcemanager.pid")
+nodemanager_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-nodemanager.pid")
+histroyserver_pid_file = format("{mapred_pid_dir}/mapred-{mapred_user}-historyserver.pid")
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/yarn.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/yarn.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/yarn.py
new file mode 100644
index 0000000..1d97373
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/yarn.py
@@ -0,0 +1,126 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+import sys
+
+
+def yarn():
+  import params
+
+  Directory([params.yarn_pid_dir, params.yarn_log_dir],
+            owner=params.yarn_user,
+            group=params.user_group,
+            recursive=True
+  )
+
+  Directory([params.mapred_pid_dir, params.mapred_log_dir],
+            owner=params.mapred_user,
+            group=params.user_group,
+            recursive=True
+  )
+
+  Directory([params.nm_local_dirs, params.nm_log_dirs, params.yarn_log_dir_prefix],
+            owner=params.yarn_user,
+            recursive=True
+  )
+
+  XmlConfig("core-site.xml",
+            conf_dir=params.config_dir,
+            configurations=params.config['configurations']['core-site'],
+            owner=params.hdfs_user,
+            group=params.user_group,
+            mode=0644
+  )
+
+  XmlConfig("mapred-site.xml",
+            conf_dir=params.config_dir,
+            configurations=params.config['configurations']['mapred-site'],
+            owner=params.yarn_user,
+            group=params.user_group,
+            mode=0644
+  )
+
+  XmlConfig("yarn-site.xml",
+            conf_dir=params.config_dir,
+            configurations=params.config['configurations']['yarn-site'],
+            owner=params.yarn_user,
+            group=params.user_group,
+            mode=0644
+  )
+
+  XmlConfig("capacity-scheduler.xml",
+            conf_dir=params.config_dir,
+            configurations=params.config['configurations']['capacity-scheduler'],
+            owner=params.yarn_user,
+            group=params.user_group,
+            mode=0644
+  )
+
+  File(params.yarn_job_summary_log,
+       owner=params.yarn_user,
+       group=params.user_group
+  )
+
+  File(params.mapred_job_summary_log,
+       owner=params.mapred_user,
+       group=params.user_group
+  )
+
+  File(format("{limits_conf_dir}/yarn.conf"),
+       mode=0644,
+       content=Template('yarn.conf.j2')
+  )
+
+  File(format("{limits_conf_dir}/mapreduce.conf"),
+       mode=0644,
+       content=Template('mapreduce.conf.j2')
+  )
+
+  File(format("{config_dir}/yarn-env.sh"),
+       owner=params.yarn_user,
+       group=params.user_group,
+       mode=0755,
+       content=Template('yarn-env.sh.j2')
+  )
+
+  File(format("{config_dir}/hadoop-env.sh"),
+       owner=params.hdfs_user,
+       group=params.user_group,
+       mode=0755,
+       content=StaticFile(format('{hadoop_conf_dir}/hadoop-env.sh'))
+  )
+
+  if params.security_enabled:
+    container_executor = format("{yarn_container_bin}/container-executor")
+    File(container_executor,
+         group=params.yarn_executor_container_group,
+         mode=06050
+    )
+    
+    File(format("{config_dir}/container-executor.cfg"),
+         group=params.user_group,
+         mode=0644,
+         content=Template('container-executor.cfg.j2')
+    )
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/yarn_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/yarn_client.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/yarn_client.py
new file mode 100644
index 0000000..7e9c564
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/scripts/yarn_client.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+
+from yarn import yarn
+
+class YarnClient(Script):
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    yarn()
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+  YarnClient().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/templates/container-executor.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/templates/container-executor.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/templates/container-executor.cfg.j2
new file mode 100644
index 0000000..29ad949
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/templates/container-executor.cfg.j2
@@ -0,0 +1,22 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+yarn.nodemanager.local-dirs={{nm_local_dirs}}
+yarn.nodemanager.log-dirs={{nm_log_dirs}}
+yarn.nodemanager.linux-container-executor.group={{yarn_executor_container_group}}
+banned.users = hfds,yarn,mapred,bin
+min.user.id=1000

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/templates/exclude_hosts_list.j2
new file mode 100644
index 0000000..4a4c698
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/templates/exclude_hosts_list.j2
@@ -0,0 +1,3 @@
+{% for host in exclude_hosts %}
+{{host}}
+{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/templates/mapreduce.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/templates/mapreduce.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/templates/mapreduce.conf.j2
new file mode 100644
index 0000000..76caea4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/templates/mapreduce.conf.j2
@@ -0,0 +1,17 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+{{mapred_user}}   - nofile 32768
+{{mapred_user}}   - nproc  65536

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/templates/yarn-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/templates/yarn-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/templates/yarn-env.sh.j2
new file mode 100644
index 0000000..70bb71a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/templates/yarn-env.sh.j2
@@ -0,0 +1,119 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+export HADOOP_YARN_HOME={{hadoop_yarn_home}}
+export YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER
+export YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+export JAVA_HOME={{java64_home}}
+
+# User for YARN daemons
+export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}
+
+# resolve links - $0 may be a softlink
+export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}"
+
+# some Java parameters
+# export JAVA_HOME=/home/y/libexec/jdk1.6.0/
+if [ "$JAVA_HOME" != "" ]; then
+  #echo "run java in $JAVA_HOME"
+  JAVA_HOME=$JAVA_HOME
+fi
+
+if [ "$JAVA_HOME" = "" ]; then
+  echo "Error: JAVA_HOME is not set."
+  exit 1
+fi
+
+JAVA=$JAVA_HOME/bin/java
+JAVA_HEAP_MAX=-Xmx1000m
+
+# For setting YARN specific HEAP sizes please use this
+# Parameter and set appropriately
+YARN_HEAPSIZE={{yarn_heapsize}}
+
+# check envvars which might override default args
+if [ "$YARN_HEAPSIZE" != "" ]; then
+  JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m"
+fi
+
+# Resource Manager specific parameters
+
+# Specify the max Heapsize for the ResourceManager using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1000.
+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
+# and/or YARN_RESOURCEMANAGER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+export YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}
+
+# Specify the JVM options to be used when starting the ResourceManager.
+# These options will be appended to the options specified as YARN_OPTS
+# and therefore may override any similar flags set in YARN_OPTS
+#export YARN_RESOURCEMANAGER_OPTS=
+
+# Node Manager specific parameters
+
+# Specify the max Heapsize for the NodeManager using a numerical value
+# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
+# the value to 1000.
+# This value will be overridden by an Xmx setting specified in either YARN_OPTS
+# and/or YARN_NODEMANAGER_OPTS.
+# If not specified, the default value will be picked from either YARN_HEAPMAX
+# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
+export YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}
+
+# Specify the JVM options to be used when starting the NodeManager.
+# These options will be appended to the options specified as YARN_OPTS
+# and therefore may override any similar flags set in YARN_OPTS
+#export YARN_NODEMANAGER_OPTS=
+
+# so that filenames w/ spaces are handled correctly in loops below
+IFS=
+
+
+# default log directory & file
+if [ "$YARN_LOG_DIR" = "" ]; then
+  YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
+fi
+if [ "$YARN_LOGFILE" = "" ]; then
+  YARN_LOGFILE='yarn.log'
+fi
+
+# default policy file for service-level authorization
+if [ "$YARN_POLICYFILE" = "" ]; then
+  YARN_POLICYFILE="hadoop-policy.xml"
+fi
+
+# restore ordinary behaviour
+unset IFS
+
+
+YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR"
+YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR"
+YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE"
+YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE"
+YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME"
+YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING"
+YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
+if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
+  YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
+fi
+YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/templates/yarn.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/templates/yarn.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/templates/yarn.conf.j2
new file mode 100644
index 0000000..be89b07
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/package/templates/yarn.conf.j2
@@ -0,0 +1,17 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+{{yarn_user}}   - nofile 32768
+{{yarn_user}}   - nproc  65536

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/configuration/global.xml
new file mode 100644
index 0000000..f78df89
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/configuration/global.xml
@@ -0,0 +1,75 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>zk_user</name>
+    <value>zookeeper</value>
+    <description>ZooKeeper User.</description>
+  </property>
+  <property>
+    <name>zookeeperserver_host</name>
+    <value></value>
+    <description>ZooKeeper Server Hosts.</description>
+  </property>
+  <property>
+    <name>zk_data_dir</name>
+    <value>/hadoop/zookeeper</value>
+    <description>Data directory for ZooKeeper.</description>
+  </property>
+  <property>
+    <name>zk_log_dir</name>
+    <value>/var/log/zookeeper</value>
+    <description>ZooKeeper Log Dir</description>
+  </property>
+  <property>
+    <name>zk_pid_dir</name>
+    <value>/var/run/zookeeper</value>
+    <description>ZooKeeper Pid Dir</description>
+  </property>
+  <property>
+    <name>zk_pid_file</name>
+    <value>/var/run/zookeeper/zookeeper_server.pid</value>
+    <description>ZooKeeper Pid File</description>
+  </property>
+  <property>
+    <name>tickTime</name>
+    <value>2000</value>
+    <description>The length of a single tick in milliseconds, which is the basic time unit used by ZooKeeper</description>
+  </property>
+  <property>
+    <name>initLimit</name>
+    <value>10</value>
+    <description>Ticks to allow for sync at Init.</description>
+  </property>
+  <property>
+    <name>syncLimit</name>
+    <value>5</value>
+    <description>Ticks to allow for sync at Runtime.</description>
+  </property>
+  <property>
+    <name>clientPort</name>
+    <value>2181</value>
+    <description>Port for running ZK Server.</description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/metainfo.xml
new file mode 100644
index 0000000..af5b8cf
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/metainfo.xml
@@ -0,0 +1,70 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>ZOOKEEPER</name>
+      <comment>Centralized service which provides highly reliable distributed coordination</comment>
+      <version>3.4.5.2.1.1</version>
+      <components>
+
+        <component>
+          <name>ZOOKEEPER_SERVER</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/zookeeper_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>ZOOKEEPER_CLIENT</name>
+          <category>CLIENT</category>
+          <commandScript>
+            <script>scripts/zookeeper_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>zookeeper</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>global</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/files/zkEnv.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/files/zkEnv.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/files/zkEnv.sh
new file mode 100644
index 0000000..07017e1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/files/zkEnv.sh
@@ -0,0 +1,96 @@
+#!/bin/sh
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script should be sourced into other zookeeper
+# scripts to setup the env variables
+
+# We use ZOOCFGDIR if defined,
+# otherwise we use /etc/zookeeper
+# or the conf directory that is
+# a sibling of this script's directory
+if [ "x$ZOOCFGDIR" = "x" ]
+then
+    if [ -d "/etc/zookeeper" ]
+    then
+        ZOOCFGDIR="/etc/zookeeper"
+    else
+        ZOOCFGDIR="$ZOOBINDIR/../conf"
+    fi
+fi
+
+if [ "x$ZOOCFG" = "x" ]
+then
+    ZOOCFG="zoo.cfg"
+fi
+
+ZOOCFG="$ZOOCFGDIR/$ZOOCFG"
+
+if [ -e "$ZOOCFGDIR/zookeeper-env.sh" ]
+then
+    . "$ZOOCFGDIR/zookeeper-env.sh"
+fi
+
+if [ "x${ZOO_LOG_DIR}" = "x" ]
+then
+    ZOO_LOG_DIR="."
+fi
+
+if [ "x${ZOO_LOG4J_PROP}" = "x" ]
+then
+    ZOO_LOG4J_PROP="INFO,CONSOLE"
+fi
+
+#add the zoocfg dir to classpath
+CLASSPATH="$ZOOCFGDIR:$CLASSPATH"
+
+for i in "$ZOOBINDIR"/../src/java/lib/*.jar
+do
+    CLASSPATH="$i:$CLASSPATH"
+done
+
+#make it work in the release
+for i in "$ZOOBINDIR"/../lib/*.jar
+do
+    CLASSPATH="$i:$CLASSPATH"
+done
+
+#make it work in the release
+for i in "$ZOOBINDIR"/../zookeeper-*.jar
+do
+    CLASSPATH="$i:$CLASSPATH"
+done
+
+#make it work for developers
+for d in "$ZOOBINDIR"/../build/lib/*.jar
+do
+   CLASSPATH="$d:$CLASSPATH"
+done
+
+#make it work for developers
+CLASSPATH="$ZOOBINDIR/../build/classes:$CLASSPATH"
+
+case "`uname`" in
+    CYGWIN*) cygwin=true ;;
+    *) cygwin=false ;;
+esac
+
+if $cygwin
+then
+    CLASSPATH=`cygpath -wp "$CLASSPATH"`
+fi
+
+#echo "CLASSPATH=$CLASSPATH"

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/files/zkServer.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/files/zkServer.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/files/zkServer.sh
new file mode 100644
index 0000000..49ceb4d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/files/zkServer.sh
@@ -0,0 +1,120 @@
+#!/bin/sh
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# If this scripted is run out of /usr/bin or some other system bin directory
+# it should be linked to and not copied. Things like java jar files are found
+# relative to the canonical path of this script.
+#
+
+# See the following page for extensive details on setting
+# up the JVM to accept JMX remote management:
+# http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
+# by default we allow local JMX connections
+if [ "x$JMXLOCALONLY" = "x" ]
+then
+    JMXLOCALONLY=false
+fi
+
+if [ "x$JMXDISABLE" = "x" ]
+then
+    echo "JMX enabled by default"
+    # for some reason these two options are necessary on jdk6 on Ubuntu
+    #   accord to the docs they are not necessary, but otw jconsole cannot
+    #   do a local attach
+    ZOOMAIN="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.local.only=$JMXLOCALONLY org.apache.zookeeper.server.quorum.QuorumPeerMain"
+else
+    echo "JMX disabled by user request"
+    ZOOMAIN="org.apache.zookeeper.server.quorum.QuorumPeerMain"
+fi
+
+# Only follow symlinks if readlink supports it
+if readlink -f "$0" > /dev/null 2>&1
+then
+  ZOOBIN=`readlink -f "$0"`
+else
+  ZOOBIN="$0"
+fi
+ZOOBINDIR=`dirname "$ZOOBIN"`
+
+. "$ZOOBINDIR"/zkEnv.sh
+
+if [ "x$2" != "x" ]
+then
+    ZOOCFG="$ZOOCFGDIR/$2"
+fi
+
+if $cygwin
+then
+    ZOOCFG=`cygpath -wp "$ZOOCFG"`
+    # cygwin has a "kill" in the shell itself, gets confused
+    KILL=/bin/kill
+else
+    KILL=kill
+fi
+
+echo "Using config: $ZOOCFG"
+
+ZOOPIDFILE=$(grep dataDir "$ZOOCFG" | sed -e 's/.*=//')/zookeeper_server.pid
+
+
+case $1 in
+start)
+    echo  "Starting zookeeper ... "
+    $JAVA  "-Dzookeeper.log.dir=${ZOO_LOG_DIR}" "-Dzookeeper.root.logger=${ZOO_LOG4J_PROP}" \
+    -cp "$CLASSPATH" $JVMFLAGS $ZOOMAIN "$ZOOCFG" &
+    /bin/echo -n $! > "$ZOOPIDFILE"
+    echo STARTED
+    ;;
+stop)
+    echo "Stopping zookeeper ... "
+    if [ ! -f "$ZOOPIDFILE" ]
+    then
+    echo "error: could not find file $ZOOPIDFILE"
+    exit 1
+    else
+    $KILL -9 $(cat "$ZOOPIDFILE")
+    rm "$ZOOPIDFILE"
+    echo STOPPED
+    fi
+    ;;
+upgrade)
+    shift
+    echo "upgrading the servers to 3.*"
+    java "-Dzookeeper.log.dir=${ZOO_LOG_DIR}" "-Dzookeeper.root.logger=${ZOO_LOG4J_PROP}" \
+    -cp "$CLASSPATH" $JVMFLAGS org.apache.zookeeper.server.upgrade.UpgradeMain ${@}
+    echo "Upgrading ... "
+    ;;
+restart)
+    shift
+    "$0" stop ${@}
+    sleep 3
+    "$0" start ${@}
+    ;;
+status)
+    STAT=`echo stat | nc localhost $(grep clientPort "$ZOOCFG" | sed -e 's/.*=//') 2> /dev/null| grep Mode`
+    if [ "x$STAT" = "x" ]
+    then
+        echo "Error contacting service. It is probably not running."
+    else
+        echo $STAT
+    fi
+    ;;
+*)
+    echo "Usage: $0 {start|stop|restart|status}" >&2
+
+esac

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/files/zkService.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/files/zkService.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/files/zkService.sh
new file mode 100644
index 0000000..32dfce4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/files/zkService.sh
@@ -0,0 +1,26 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+zkcli_script=$1
+user=$2
+conf_dir=$3
+su - $user -c "source $conf_dir/zookeeper-env.sh ; echo 'ls /' | $zkcli_script"

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/files/zkSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/files/zkSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/files/zkSmoke.sh
new file mode 100644
index 0000000..c1c11b4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/files/zkSmoke.sh
@@ -0,0 +1,78 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+smoke_script=$1
+smoke_user=$2
+conf_dir=$3
+client_port=$4
+security_enabled=$5
+kinit_path_local=$6
+smoke_user_keytab=$7
+export ZOOKEEPER_EXIT_CODE=0
+test_output_file=/tmp/zkSmoke.out
+errors_expr="ERROR|Exception"
+acceptable_expr="SecurityException"
+zkhosts=` grep "^server\.[[:digit:]]"  $conf_dir/zoo.cfg  | cut -f 2 -d '=' | cut -f 1 -d ':' | tr '\n' ' ' `
+zk_node1=`echo $zkhosts | tr ' ' '\n' | head -n 1`  
+echo "zk_node1=$zk_node1"
+if [[ $security_enabled == "True" ]]; then
+  kinitcmd="$kinit_path_local -kt $smoke_user_keytab $smoke_user"
+  su - $smoke_user -c "$kinitcmd"
+fi
+
+function verify_output() {
+  if [ -f $test_output_file ]; then
+    errors=`grep -E $errors_expr $test_output_file | grep -v $acceptable_expr`
+    if [ "$?" -eq 0 ]; then
+      echo "Error found in the zookeeper smoke test. Exiting."
+      echo $errors
+      exit 1
+    fi
+  fi
+}
+
+# Delete /zk_smoketest znode if exists
+su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ;  echo delete /zk_smoketest | ${smoke_script} -server $zk_node1:$client_port" 2>&1>$test_output_file
+# Create /zk_smoketest znode on one zookeeper server
+su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo create /zk_smoketest smoke_data | ${smoke_script} -server $zk_node1:$client_port" 2>&1>>$test_output_file
+verify_output
+
+for i in $zkhosts ; do
+  echo "Running test on host $i"
+  # Verify the data associated with znode across all the nodes in the zookeeper quorum
+  su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'get /zk_smoketest' | ${smoke_script} -server $i:$client_port"
+  su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'ls /' | ${smoke_script} -server $i:$client_port"
+  output=$(su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'get /zk_smoketest' | ${smoke_script} -server $i:$client_port")
+  echo $output | grep smoke_data
+  if [[ $? -ne 0 ]] ; then
+    echo "Data associated with znode /zk_smoketests is not consistent on host $i"
+    ((ZOOKEEPER_EXIT_CODE=$ZOOKEEPER_EXIT_CODE+1))
+  fi
+done
+
+su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'delete /zk_smoketest' | ${smoke_script} -server $zk_node1:$client_port"
+if [[ "$ZOOKEEPER_EXIT_CODE" -ne "0" ]] ; then
+  echo "Zookeeper Smoke Test: Failed" 
+else
+   echo "Zookeeper Smoke Test: Passed" 
+fi
+exit $ZOOKEEPER_EXIT_CODE

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/__init__.py
new file mode 100644
index 0000000..a582077
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/__init__.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/params.py
new file mode 100644
index 0000000..9acc0c9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/params.py
@@ -0,0 +1,71 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+import status_params
+
+# server configurations
+config = Script.get_config()
+
+config_dir = "/etc/zookeeper/conf"
+zk_user =  config['configurations']['global']['zk_user']
+hostname = config['hostname']
+zk_bin = '/usr/lib/zookeeper/bin'
+user_group = config['configurations']['global']['user_group']
+
+smoke_script = "/usr/lib/zookeeper/bin/zkCli.sh"
+
+zk_log_dir = config['configurations']['global']['zk_log_dir']
+zk_data_dir = config['configurations']['global']['zk_data_dir']
+zk_pid_dir = status_params.zk_pid_dir
+zk_pid_file = status_params.zk_pid_file
+zk_server_heapsize = "-Xmx1024m"
+
+tickTime = config['configurations']['global']['tickTime']
+initLimit = config['configurations']['global']['initLimit']
+syncLimit = config['configurations']['global']['syncLimit']
+clientPort = config['configurations']['global']['clientPort']
+
+if 'zoo.cfg' in config['configurations']:
+  zoo_cfg_properties_map = config['configurations']['zoo.cfg']
+else:
+  zoo_cfg_properties_map = {}
+zoo_cfg_properties_map_length = len(zoo_cfg_properties_map)
+
+zk_primary_name = "zookeeper"
+zk_principal_name = "zookeeper/_HOST@EXAMPLE.COM"
+zk_principal = zk_principal_name.replace('_HOST',hostname)
+
+java64_home = config['hostLevelParams']['java_home']
+
+zookeeper_hosts = config['clusterHostInfo']['zookeeper_hosts']
+zookeeper_hosts.sort()
+
+keytab_path = "/etc/security/keytabs"
+zk_keytab_path = format("{keytab_path}/zk.service.keytab")
+zk_server_jaas_file = format("{config_dir}/zookeeper_jaas.conf")
+zk_client_jaas_file = format("{config_dir}/zookeeper_client_jaas.conf")
+security_enabled = config['configurations']['global']['security_enabled']
+
+smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
+smokeuser = config['configurations']['global']['smokeuser']
+kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/service_check.py
new file mode 100644
index 0000000..6b3553d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/service_check.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+class ZookeeperServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    File("/tmp/zkSmoke.sh",
+         mode=0755,
+         content=StaticFile('zkSmoke.sh')
+    )
+
+    cmd_qourum = format("sh /tmp/zkSmoke.sh {smoke_script} {smokeuser} {config_dir} {clientPort} "
+                  "{security_enabled} {kinit_path_local} {smokeUserKeytab}",
+                  smokeUserKeytab=params.smoke_user_keytab if params.security_enabled else "no_keytab")
+
+    Execute(cmd_qourum,
+            tries=3,
+            try_sleep=5,
+            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+            logoutput=True
+    )
+
+if __name__ == "__main__":
+  ZookeeperServiceCheck().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/status_params.py
new file mode 100644
index 0000000..98f2903
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/status_params.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+zk_pid_dir = config['configurations']['global']['zk_pid_dir']
+zk_pid_file = format("{zk_pid_dir}/zookeeper_server.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/zookeeper.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/zookeeper.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/zookeeper.py
new file mode 100644
index 0000000..c49eb22
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/zookeeper.py
@@ -0,0 +1,92 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+import sys
+
+
+def zookeeper(type = None):
+  import params
+
+  Directory(params.config_dir,
+            owner=params.zk_user,
+            recursive=True,
+            group=params.user_group
+  )
+
+  configFile("zoo.cfg", template_name="zoo.cfg.j2")
+  configFile("zookeeper-env.sh", template_name="zookeeper-env.sh.j2")
+  configFile("configuration.xsl", template_name="configuration.xsl.j2")
+
+  Directory(params.zk_pid_dir,
+            owner=params.zk_user,
+            recursive=True,
+            group=params.user_group
+  )
+
+  Directory(params.zk_log_dir,
+            owner=params.zk_user,
+            recursive=True,
+            group=params.user_group
+  )
+
+  Directory(params.zk_data_dir,
+            owner=params.zk_user,
+            recursive=True,
+            group=params.user_group
+  )
+
+  if type == 'server':
+    myid = str(sorted(params.zookeeper_hosts).index(params.hostname) + 1)
+
+    File(format("{zk_data_dir}/myid"),
+         mode = 0644,
+         content = myid
+    )
+
+  configFile("log4j.properties", template_name="log4j.properties.j2")
+
+  if params.security_enabled:
+    if type == "server":
+      configFile("zookeeper_jaas.conf", template_name="zookeeper_jaas.conf.j2")
+      configFile("zookeeper_client_jaas.conf", template_name="zookeeper_client_jaas.conf.j2")
+    else:
+      configFile("zookeeper_client_jaas.conf", template_name="zookeeper_client_jaas.conf.j2")
+
+  File(format("{config_dir}/zoo_sample.cfg"),
+       owner=params.zk_user,
+       group=params.user_group
+  )
+
+
+def configFile(name, template_name=None):
+  import params
+
+  File(format("{config_dir}/{name}"),
+       content=Template(template_name),
+       owner=params.zk_user,
+       group=params.user_group
+  )
+
+
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/zookeeper_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/zookeeper_client.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/zookeeper_client.py
new file mode 100644
index 0000000..028a37d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/zookeeper_client.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+
+from zookeeper import zookeeper
+
+class ZookeeperClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    zookeeper(type='client')
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+  ZookeeperClient().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/zookeeper_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/zookeeper_server.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/zookeeper_server.py
new file mode 100644
index 0000000..e8cc264
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/zookeeper_server.py
@@ -0,0 +1,55 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+
+from zookeeper import zookeeper
+from zookeeper_service import zookeeper_service
+
+class ZookeeperServer(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    zookeeper(type='server')
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env)
+    zookeeper_service(action = 'start')
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+    zookeeper_service(action = 'stop')
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.zk_pid_file)
+
+if __name__ == "__main__":
+  ZookeeperServer().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/zookeeper_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/zookeeper_service.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/zookeeper_service.py
new file mode 100644
index 0000000..83b8f08
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/scripts/zookeeper_service.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+def zookeeper_service(action='start'):
+  import params
+
+  cmd = format("env ZOOCFGDIR={config_dir} ZOOCFG=zoo.cfg {zk_bin}/zkServer.sh")
+
+  if action == 'start':
+    daemon_cmd = format("source {config_dir}/zookeeper-env.sh ; {cmd} start")
+    no_op_test = format("ls {zk_pid_file} >/dev/null 2>&1 && ps `cat {zk_pid_file}` >/dev/null 2>&1")
+    Execute(daemon_cmd,
+            not_if=no_op_test,
+            user=params.zk_user
+    )
+  elif action == 'stop':
+    daemon_cmd = format("source {config_dir}/zookeeper-env.sh ; {cmd} stop")
+    rm_pid = format("rm -f {zk_pid_file}")
+    Execute(daemon_cmd,
+            user=params.zk_user
+    )
+    Execute(rm_pid)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/templates/configuration.xsl.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/templates/configuration.xsl.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/templates/configuration.xsl.j2
new file mode 100644
index 0000000..c003ba2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/ZOOKEEPER/package/templates/configuration.xsl.j2
@@ -0,0 +1,24 @@
+<?xml version="1.0"?>
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+<xsl:output method="html"/>
+<xsl:template match="configuration">
+<html>
+<body>
+<table border="1">
+<tr>
+ <td>name</td>
+ <td>value</td>
+ <td>description</td>
+</tr>
+<xsl:for-each select="property">
+  <tr>
+     <td><a name="{name}"><xsl:value-of select="name"/></a></td>
+     <td><xsl:value-of select="value"/></td>
+     <td><xsl:value-of select="description"/></td>
+  </tr>
+</xsl:for-each>
+</table>
+</body>
+</html>
+</xsl:template>
+</xsl:stylesheet>


[08/37] AMBARI-4341. Rename 2.0.8 to 2.1.1 in the stack definition. (mahadev)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hcat_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hcat_service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hcat_service_check.py
new file mode 100644
index 0000000..5112e99
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hcat_service_check.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+def hcat_service_check():
+    import params
+
+    unique = get_unique_id_and_date()
+    output_file = format("/apps/hive/warehouse/hcatsmoke{unique}")
+    test_cmd = format("fs -test -e {output_file}")
+
+    if params.security_enabled:
+      kinit_cmd = format(
+        "{kinit_path_local} -kt {smoke_user_keytab} {smokeuser}; ")
+    else:
+      kinit_cmd = ""
+
+    File('/tmp/hcatSmoke.sh',
+         content=StaticFile("hcatSmoke.sh"),
+         mode=0755
+    )
+
+    prepare_cmd = format("{kinit_cmd}sh /tmp/hcatSmoke.sh hcatsmoke{unique} prepare")
+
+    Execute(prepare_cmd,
+            tries=3,
+            user=params.smokeuser,
+            try_sleep=5,
+            path=['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
+            logoutput=True)
+
+    ExecuteHadoop(test_cmd,
+                  user=params.hdfs_user,
+                  logoutput=True,
+                  conf_dir=params.hadoop_conf_dir)
+
+    cleanup_cmd = format("{kinit_cmd}sh /tmp/hcatSmoke.sh hcatsmoke{unique} cleanup")
+
+    Execute(cleanup_cmd,
+            tries=3,
+            user=params.smokeuser,
+            try_sleep=5,
+            path=['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
+            logoutput=True
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hive.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hive.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hive.py
new file mode 100644
index 0000000..b37ebb2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hive.py
@@ -0,0 +1,122 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import sys
+
+
+def hive(name=None):
+  import params
+
+  if name == 'metastore' or name == 'hiveserver2':
+    hive_config_dir = params.hive_server_conf_dir
+    config_file_mode = 0600
+    jdbc_connector()
+  else:
+    hive_config_dir = params.hive_conf_dir
+    config_file_mode = 0644
+
+  Directory(hive_config_dir,
+            owner=params.hive_user,
+            group=params.user_group,
+            recursive=True
+  )
+
+  XmlConfig("hive-site.xml",
+            conf_dir=hive_config_dir,
+            configurations=params.config['configurations']['hive-site'],
+            owner=params.hive_user,
+            group=params.user_group,
+            mode=config_file_mode
+  )
+
+  cmd = format("/bin/sh -c 'cd /usr/lib/ambari-agent/ && curl -kf --retry 5 "
+               "{jdk_location}{check_db_connection_jar_name} -o {check_db_connection_jar_name}'")
+
+  Execute(cmd,
+          not_if=format("[ -f {check_db_connection_jar_name}]"))
+
+  if name == 'metastore':
+    File(params.start_metastore_path,
+         mode=0755,
+         content=StaticFile('startMetastore.sh')
+    )
+
+  elif name == 'hiveserver2':
+    File(params.start_hiveserver2_path,
+         mode=0755,
+         content=StaticFile('startHiveserver2.sh')
+    )
+
+  if name != "client":
+    crt_directory(params.hive_pid_dir)
+    crt_directory(params.hive_log_dir)
+    crt_directory(params.hive_var_lib)
+
+  File(format("{hive_config_dir}/hive-env.sh"),
+       owner=params.hive_user,
+       group=params.user_group,
+       content=Template('hive-env.sh.j2', conf_dir=hive_config_dir)
+  )
+
+  crt_file(format("{hive_conf_dir}/hive-default.xml.template"))
+  crt_file(format("{hive_conf_dir}/hive-env.sh.template"))
+  crt_file(format("{hive_conf_dir}/hive-exec-log4j.properties.template"))
+  crt_file(format("{hive_conf_dir}/hive-log4j.properties.template"))
+
+
+def crt_directory(name):
+  import params
+
+  Directory(name,
+            recursive=True,
+            owner=params.hive_user,
+            group=params.user_group,
+            mode=0755)
+
+
+def crt_file(name):
+  import params
+
+  File(name,
+       owner=params.hive_user,
+       group=params.user_group
+  )
+
+
+def jdbc_connector():
+  import params
+
+  if params.hive_jdbc_driver == "com.mysql.jdbc.Driver":
+    cmd = format("hive mkdir -p {artifact_dir} ; cp /usr/share/java/{jdbc_jar_name} {target}")
+
+    Execute(cmd,
+            not_if=format("test -f {target}"),
+            creates=params.target,
+            path=["/bin", "usr/bin/"])
+
+  elif params.hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
+    cmd = format(
+      "mkdir -p {artifact_dir} ; curl -kf --retry 10 {driver_curl_source} -o {driver_curl_target} &&  "
+      "cp {driver_curl_target} {target}")
+
+    Execute(cmd,
+            not_if=format("test -f {target}"),
+            path=["/bin", "usr/bin/"])

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hive_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hive_client.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hive_client.py
new file mode 100644
index 0000000..0a5fb2b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hive_client.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import sys
+from resource_management import *
+
+from hive import hive
+
+class HiveClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    hive(name='client')
+
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+  HiveClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hive_metastore.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hive_metastore.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hive_metastore.py
new file mode 100644
index 0000000..c741174
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hive_metastore.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from hive import hive
+from hive_service import hive_service
+
+class HiveMetastore(Script):
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    hive(name='metastore')
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    hive_service( 'metastore',
+                   action = 'start'
+    )
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    hive_service( 'metastore',
+                   action = 'stop'
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{hive_pid_dir}/{hive_metastore_pid}")
+    # Recursively check all existing gmetad pid files
+    check_process_status(pid_file)
+
+if __name__ == "__main__":
+  HiveMetastore().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hive_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hive_server.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hive_server.py
new file mode 100644
index 0000000..3ad81a1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hive_server.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from hive import hive
+from hive_service import hive_service
+
+class HiveServer(Script):
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    hive(name='hiveserver2')
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    hive_service( 'hiveserver2',
+                  action = 'start'
+    )
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    hive_service( 'hiveserver2',
+                  action = 'stop'
+    )
+
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{hive_pid_dir}/{hive_pid}")
+    # Recursively check all existing gmetad pid files
+    check_process_status(pid_file)
+
+if __name__ == "__main__":
+  HiveServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hive_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hive_service.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hive_service.py
new file mode 100644
index 0000000..e8d4e5c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/hive_service.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+def hive_service(
+    name,
+    action='start'):
+
+  import params
+
+  if name == 'metastore':
+    pid_file = format("{hive_pid_dir}/{hive_metastore_pid}")
+    cmd = format(
+      "env HADOOP_HOME={hadoop_home} JAVA_HOME={java64_home} {start_metastore_path} {hive_log_dir}/hive.out {hive_log_dir}/hive.log {pid_file} {hive_server_conf_dir}")
+  elif name == 'hiveserver2':
+    pid_file = format("{hive_pid_dir}/{hive_pid}")
+    cmd = format(
+      "env JAVA_HOME={java64_home} {start_hiveserver2_path} {hive_log_dir}/hive-server2.out {hive_log_dir}/hive-server2.log {pid_file} {hive_server_conf_dir}")
+
+  if action == 'start':
+    demon_cmd = format("{cmd}")
+    no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
+    Execute(demon_cmd,
+            user=params.hive_user,
+            not_if=no_op_test
+    )
+
+    if params.hive_jdbc_driver == "com.mysql.jdbc.Driver" or params.hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
+      db_connection_check_command = format(
+        "{java64_home}/bin/java -cp {check_db_connection_jar}:/usr/share/java/{jdbc_jar_name} org.apache.ambari.server.DBConnectionVerification {hive_jdbc_connection_url} {hive_metastore_user_name} {hive_metastore_user_passwd} {hive_jdbc_driver}")
+      Execute(db_connection_check_command,
+              path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin')
+
+  elif action == 'stop':
+    demon_cmd = format("kill `cat {pid_file}` >/dev/null 2>&1 && rm -f {pid_file}")
+    Execute(demon_cmd)
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/mysql_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/mysql_server.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/mysql_server.py
new file mode 100644
index 0000000..a45d310
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/mysql_server.py
@@ -0,0 +1,77 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from mysql_service import mysql_service
+
+class MysqlServer(Script):
+
+  if System.get_instance().platform == "suse":
+    daemon_name = 'mysql'
+  else:
+    daemon_name = 'mysqld'
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    mysql_service(daemon_name=self.daemon_name, action='start')
+
+    File(params.mysql_adduser_path,
+         mode=0755,
+         content=StaticFile('addMysqlUser.sh')
+    )
+
+    # Autoescaping
+    cmd = ("bash", "-x", params.mysql_adduser_path, self.daemon_name,
+           params.hive_metastore_user_name, str(params.hive_metastore_user_passwd) , params.mysql_host[0])
+
+    Execute(cmd,
+            tries=3,
+            try_sleep=5,
+            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+            logoutput=True
+    )
+
+    mysql_service(daemon_name=self.daemon_name, action='stop')
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+
+    mysql_service(daemon_name=self.daemon_name, action = 'start')
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    mysql_service(daemon_name=self.daemon_name, action = 'stop')
+
+  def status(self, env):
+    mysql_service(daemon_name=self.daemon_name, action = 'status')
+
+if __name__ == "__main__":
+  MysqlServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/mysql_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/mysql_service.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/mysql_service.py
new file mode 100644
index 0000000..cfb3e08
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/mysql_service.py
@@ -0,0 +1,38 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+def mysql_service(daemon_name=None, action='start'):
+  cmd = format('service {daemon_name} {action}')
+
+  if action == 'status':
+    logoutput = False
+  else:
+    logoutput = True
+
+  Execute(cmd,
+          path="/usr/local/bin/:/bin/:/sbin/",
+          tries=1,
+          logoutput=logoutput)
+
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/params.py
new file mode 100644
index 0000000..0cf89be
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/params.py
@@ -0,0 +1,123 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import status_params
+
+# server configurations
+config = Script.get_config()
+
+hive_metastore_user_name = config['configurations']['hive-site']['javax.jdo.option.ConnectionUserName']
+hive_server_conf_dir = "/etc/hive/conf.server"
+hive_jdbc_connection_url = config['configurations']['hive-site']['javax.jdo.option.ConnectionURL']
+
+hive_metastore_user_passwd = config['configurations']['hive-site']['javax.jdo.option.ConnectionPassword']
+
+#users
+hive_user = config['configurations']['global']['hive_user']
+hive_lib = '/usr/lib/hive/lib/'
+#JDBC driver jar name
+hive_jdbc_driver = default('hive_jdbc_driver', 'com.mysql.jdbc.Driver')
+if hive_jdbc_driver == "com.mysql.jdbc.Driver":
+  jdbc_jar_name = "mysql-connector-java.jar"
+elif hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
+  jdbc_jar_name = "ojdbc6.jar"
+
+check_db_connection_jar_name = "DBConnectionVerification.jar"
+check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
+
+#common
+hive_metastore_port = config['configurations']['global']['hive_metastore_port']
+hive_var_lib = '/var/lib/hive'
+hive_server_host = config['clusterHostInfo']['hive_server_host']
+hive_url = format("jdbc:hive2://{hive_server_host}:10000")
+
+smokeuser = config['configurations']['global']['smokeuser']
+smoke_test_sql = "/tmp/hiveserver2.sql"
+smoke_test_path = "/tmp/hiveserver2Smoke.sh"
+smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
+
+security_enabled = config['configurations']['global']['security_enabled']
+
+kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+hive_metastore_keytab_path =  config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file']
+
+#hive_env
+hive_conf_dir = "/etc/hive/conf"
+hive_dbroot = config['configurations']['global']['hive_dbroot']
+hive_log_dir = config['configurations']['global']['hive_log_dir']
+hive_pid_dir = status_params.hive_pid_dir
+hive_pid = status_params.hive_pid
+
+#hive-site
+hive_database_name = config['configurations']['global']['hive_database_name']
+
+#Starting hiveserver2
+start_hiveserver2_script = 'startHiveserver2.sh'
+
+hadoop_home = '/usr'
+
+##Starting metastore
+start_metastore_script = 'startMetastore.sh'
+hive_metastore_pid = status_params.hive_metastore_pid
+java_share_dir = '/usr/share/java'
+driver_curl_target = format("{java_share_dir}/{jdbc_jar_name}")
+
+hdfs_user =  config['configurations']['global']['hdfs_user']
+user_group = config['configurations']['global']['user_group']
+artifact_dir = "/tmp/HDP-artifacts/"
+
+target = format("{hive_lib}/{jdbc_jar_name}")
+
+jdk_location = config['hostLevelParams']['jdk_location']
+driver_curl_source = format("{jdk_location}/{jdbc_jar_name}")
+
+start_hiveserver2_path = "/tmp/start_hiveserver2_script"
+start_metastore_path = "/tmp/start_metastore_script"
+
+hive_aux_jars_path = config['configurations']['global']['hive_aux_jars_path']
+hadoop_heapsize = config['configurations']['global']['hadoop_heapsize']
+java64_home = config['hostLevelParams']['java_home']
+
+##### MYSQL
+
+db_name = config['configurations']['global']['hive_database_name']
+mysql_user = "mysql"
+mysql_group = 'mysql'
+mysql_host = config['clusterHostInfo']['hive_mysql_host']
+
+mysql_adduser_path = "/tmp/addMysqlUser.sh"
+
+########## HCAT
+
+hcat_conf_dir = '/etc/hcatalog/conf'
+
+metastore_port = 9933
+hcat_lib = '/usr/lib/hcatalog/share/hcatalog'
+
+hcat_dbroot = hcat_lib
+
+hcat_user = config['configurations']['global']['hcat_user']
+webhcat_user = config['configurations']['global']['webhcat_user']
+
+hcat_pid_dir = status_params.hcat_pid_dir
+hcat_log_dir = config['configurations']['global']['hcat_log_dir']   #hcat_log_dir
+
+hadoop_conf_dir = '/etc/hadoop/conf'

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/service_check.py
new file mode 100644
index 0000000..111e8a1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/service_check.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+from hcat_service_check import hcat_service_check
+
+class HiveServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    if params.security_enabled:
+      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser};")
+      hive_principal_ext = format("principal={hive_metastore_keytab_path}")
+      hive_url_ext = format("{hive_url}/\\;{hive_principal_ext}")
+      smoke_cmd = format("{kinit_cmd} env JAVA_HOME={java64_home} {smoke_test_path} {hive_url_ext} {smoke_test_sql}")
+    else:
+      smoke_cmd = format("env JAVA_HOME={java64_home} {smoke_test_path} {hive_url} {smoke_test_sql}")
+
+    File(params.smoke_test_path,
+         content=StaticFile('hiveserver2Smoke.sh'),
+         mode=0755
+    )
+
+    File(params.smoke_test_sql,
+         content=StaticFile('hiveserver2.sql')
+    )
+
+    Execute(smoke_cmd,
+            tries=3,
+            try_sleep=5,
+            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+            logoutput=True,
+            user=params.smokeuser)
+
+    hcat_service_check()
+
+if __name__ == "__main__":
+  HiveServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/status_params.py
new file mode 100644
index 0000000..7770975
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/scripts/status_params.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+hive_pid_dir = config['configurations']['global']['hive_pid_dir']
+hive_pid = 'hive-server.pid'
+
+hive_metastore_pid = 'hive.pid'
+
+hcat_pid_dir = config['configurations']['global']['hcat_pid_dir'] #hcat_pid_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/templates/hcat-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/templates/hcat-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/templates/hcat-env.sh.j2
new file mode 100644
index 0000000..2a35240
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/templates/hcat-env.sh.j2
@@ -0,0 +1,25 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+JAVA_HOME={{java64_home}}
+HCAT_PID_DIR={{hcat_pid_dir}}/
+HCAT_LOG_DIR={{hcat_log_dir}}/
+HCAT_CONF_DIR={{hcat_conf_dir}}
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+#DBROOT is the path where the connector jars are downloaded
+DBROOT={{hcat_dbroot}}
+USER={{hcat_user}}
+METASTORE_PORT={{metastore_port}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/templates/hive-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/templates/hive-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/templates/hive-env.sh.j2
new file mode 100644
index 0000000..548262a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HIVE/package/templates/hive-env.sh.j2
@@ -0,0 +1,55 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Set Hive and Hadoop environment variables here. These variables can be used
+# to control the execution of Hive. It should be used by admins to configure
+# the Hive installation (so that users do not have to set environment variables
+# or set command line parameters to get correct behavior).
+#
+# The hive service being invoked (CLI/HWI etc.) is available via the environment
+# variable SERVICE
+
+# Hive Client memory usage can be an issue if a large number of clients
+# are running at the same time. The flags below have been useful in
+# reducing memory usage:
+#
+ if [ "$SERVICE" = "cli" ]; then
+   if [ -z "$DEBUG" ]; then
+     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
+   else
+     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
+   fi
+ fi
+
+# The heap size of the jvm stared by hive shell script can be controlled via:
+
+export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+
+# Larger heap size may be required when running queries over large number of files or partitions.
+# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
+# appropriate for hive server (hwi etc).
+
+
+# Set HADOOP_HOME to point to a specific hadoop install directory
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+
+# Hive Configuration Directory can be controlled by:
+export HIVE_CONF_DIR={{conf_dir}}
+
+# Folder containing extra ibraries required for hive compilation/execution can be controlled by:
+# export HIVE_AUX_JARS_PATH=
+export HIVE_AUX_JARS_PATH={{hive_aux_jars_path}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/metainfo.xml
new file mode 100644
index 0000000..9af461e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/metainfo.xml
@@ -0,0 +1,105 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>NAGIOS</name>
+      <comment>Nagios Monitoring and Alerting system</comment>
+      <version>3.5.0</version>
+      <components>
+        <component>
+            <name>NAGIOS_SERVER</name>
+            <category>MASTER</category>
+            <commandScript>
+              <script>scripts/nagios_server.py</script>
+              <scriptType>PYTHON</scriptType>
+              <timeout>600</timeout>
+            </commandScript>
+        </component>
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>perl</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>perl-Net-SNMP</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>nagios-plugins-1.4.9</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>nagios-3.5.0-99</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>nagios-www-3.5.0-99</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>nagios-devel-3.5.0-99</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>fping</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hdp_mon_nagios_addons</name>
+            </package>
+          </packages>
+        </osSpecific>
+        <osSpecific>
+          <osType>suse</osType>
+          <package>
+            <type>rpm</type>
+            <name>php5-json</name>
+          </package>
+        </osSpecific>
+        <osSpecific>
+          <osType>centos5</osType>
+          <package>
+            <type>rpm</type>
+            <name>php-pecl-json.x86_64</name>
+          </package>
+        </osSpecific>
+        <osSpecific>
+          <osType>redhat5</osType>
+          <package>
+            <type>rpm</type>
+            <name>php-pecl-json.x86_64</name>
+          </package>
+        </osSpecific>
+        <osSpecific>
+          <osType>oraclelinux5</osType>
+          <package>
+            <type>rpm</type>
+            <name>php-pecl-json.x86_64</name>
+          </package>
+        </osSpecific>
+      </osSpecifics>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_aggregate.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_aggregate.php b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_aggregate.php
new file mode 100644
index 0000000..f4063fb
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_aggregate.php
@@ -0,0 +1,243 @@
+<?php
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+  $options = getopt ("f:s:n:w:c:t:");
+  if (!array_key_exists('t', $options) || !array_key_exists('f', $options) || !array_key_exists('w', $options)
+      || !array_key_exists('c', $options) || !array_key_exists('s', $options)) {
+    usage();
+    exit(3);
+  }
+  $status_file=$options['f'];
+  $status_code=$options['s'];
+  $type=$options['t'];
+  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
+  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
+  if ($type == "service" && !array_key_exists('n', $options)) {
+    echo "Service description not provided -n option\n";
+    exit(3);
+  }
+  if ($type == "service") {
+    $service_name=$options['n'];
+    /* echo "DESC: " . $service_name . "\n"; */
+  }
+
+  $result = array();
+  $status_file_content = file_get_contents($status_file);
+
+  $counts;
+  if ($type == "service") {
+    $counts=query_alert_count($status_file_content, $service_name, $status_code);
+  } else {
+    $counts=query_host_count($status_file_content, $status_code);
+  }
+
+  if ($counts['total'] == 0) {
+    $percent = 0;
+  } else {
+    $percent = ($counts['actual']/$counts['total'])*100;
+  }
+  if ($percent >= $crit) {
+    echo "CRITICAL: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
+    exit (2);
+  }
+  if ($percent >= $warn) {
+    echo "WARNING: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
+    exit (1);
+  }
+  echo "OK: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
+  exit(0);
+
+
+  # Functions
+  /* print usage */
+  function usage () {
+    echo "Usage: $0 -f <status_file_path> -t type(host/service) -s <status_codes> -n <service description> -w <warn%> -c <crit%>\n";
+  }
+
+  /* Query host count */
+  function query_host_count ($status_file_content, $status_code) {
+    $num_matches = preg_match_all("/hoststatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
+    $hostcounts_object = array ();
+    $total_hosts = 0;
+    $hosts = 0;
+    foreach ($matches[0] as $object) {
+      $total_hosts++;
+      if (getParameter($object, "current_state") == $status_code) {
+        $hosts++;
+      }
+    }
+    $hostcounts_object['total'] = $total_hosts;
+    $hostcounts_object['actual'] = $hosts;
+    return $hostcounts_object;
+  }
+
+  /* Query Alert counts */
+  function query_alert_count ($status_file_content, $service_name, $status_code) {
+    $num_matches = preg_match_all("/servicestatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
+    $alertcounts_objects = array ();
+    $total_alerts=0;
+    $alerts=0;
+    foreach ($matches[0] as $object) {
+      if (getParameter($object, "service_description") == $service_name) {
+        $total_alerts++;
+        if (getParameter($object, "current_state") >= $status_code) {
+          $alerts++;
+        }
+      }
+    }
+    $alertcounts_objects['total'] = $total_alerts;
+    $alertcounts_objects['actual'] = $alerts;
+    return $alertcounts_objects;
+  }
+
+  function get_service_type($service_description)
+  {
+    $pieces = explode("::", $service_description);
+    switch ($pieces[0]) {
+      case "NAMENODE":
+        $pieces[0] = "HDFS";
+        break;
+      case "JOBTRACKER":
+        $pieces[0] = "MAPREDUCE";
+        break;
+      case "HBASEMASTER":
+        $pieces[0] = "HBASE";
+        break;
+      case "SYSTEM":
+      case "HDFS":
+      case "MAPREDUCE":
+      case "HBASE":
+        break;
+      default:
+        $pieces[0] = "UNKNOWN";
+    }
+    return $pieces[0];
+  }
+
+  function getParameter($object, $key)
+  {
+    $pattern="/\s" . $key . "[\s= ]*([\S, ]*)\n/";
+    $num_mat = preg_match($pattern, $object, $matches);
+    $value = "";
+    if ($num_mat) {
+      $value = $matches[1];
+    }
+    return $value;
+  }
+
+function indent($json) {
+
+    $result      = '';
+    $pos         = 0;
+    $strLen      = strlen($json);
+    $indentStr   = '  ';
+    $newLine     = "\n";
+    $prevChar    = '';
+    $outOfQuotes = true;
+
+    for ($i=0; $i<=$strLen; $i++) {
+
+        // Grab the next character in the string.
+        $char = substr($json, $i, 1);
+
+        // Are we inside a quoted string?
+        if ($char == '"' && $prevChar != '\\') {
+            $outOfQuotes = !$outOfQuotes;
+
+        // If this character is the end of an element,
+        // output a new line and indent the next line.
+        } else if(($char == '}' || $char == ']') && $outOfQuotes) {
+            $result .= $newLine;
+            $pos --;
+            for ($j=0; $j<$pos; $j++) {
+                $result .= $indentStr;
+            }
+        }
+
+        // Add the character to the result string.
+        $result .= $char;
+
+        // If the last character was the beginning of an element,
+        // output a new line and indent the next line.
+        if (($char == ',' || $char == '{' || $char == '[') && $outOfQuotes) {
+            $result .= $newLine;
+            if ($char == '{' || $char == '[') {
+                $pos ++;
+            }
+
+            for ($j = 0; $j < $pos; $j++) {
+                $result .= $indentStr;
+            }
+        }
+
+        $prevChar = $char;
+    }
+
+    return $result;
+}
+
+/* JSON documment format */
+/*
+{
+  "programstatus":{
+    "last_command_check":"1327385743"
+  },
+  "hostcounts":{
+    "up_nodes":"",
+    "down_nodes":""
+  },
+  "hoststatus":[
+    {
+      "host_name"="ip-10-242-191-48.ec2.internal",
+      "current_state":"0",
+      "last_hard_state":"0",
+      "plugin_output":"PING OK - Packet loss = 0%, RTA = 0.04 ms",
+      "last_check":"1327385564",
+      "current_attempt":"1",
+      "last_hard_state_change":"1327362079",
+      "last_time_up":"1327385574",
+      "last_time_down":"0",
+      "last_time_unreachable":"0",
+      "is_flapping":"0",
+      "last_check":"1327385574",
+      "servicestatus":[
+      ]
+    }
+  ],
+  "servicestatus":[
+    {
+      "service_type":"HDFS",  {HBASE, MAPREDUCE, HIVE, ZOOKEEPER}
+      "service_description":"HDFS Current Load",
+      "host_name"="ip-10-242-191-48.ec2.internal",
+      "current_attempt":"1",
+      "current_state":"0",
+      "plugin_output":"PING OK - Packet loss = 0%, RTA = 0.04 ms",
+      "last_hard_state_change":"1327362079",
+      "last_time_ok":"1327385479",
+      "last_time_warning":"0",
+      "last_time_unknown":"0",
+      "last_time_critical":"0",
+      "last_check":"1327385574",
+      "is_flapping":"0"
+    }
+  ]
+}
+*/
+
+?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_cpu.pl
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_cpu.pl b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_cpu.pl
new file mode 100644
index 0000000..a5680f7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_cpu.pl
@@ -0,0 +1,114 @@
+#!/usr/bin/perl -w 
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+use strict;
+use Net::SNMP;
+use Getopt::Long;
+
+# Variable
+my $base_proc = "1.3.6.1.2.1.25.3.3.1";   
+my $proc_load = "1.3.6.1.2.1.25.3.3.1.2"; 
+my $o_host = 	undef;
+my $o_community = undef;
+my $o_warn=	undef;
+my $o_crit=	undef;
+my $o_timeout = 15;
+my $o_port = 161;
+
+sub Usage {
+    print "Usage: $0 -H <host> -C <snmp_community> -w <warn level> -c <crit level>\n";
+}
+
+Getopt::Long::Configure ("bundling");
+GetOptions(
+  'H:s'   => \$o_host,	
+  'C:s'   => \$o_community,	
+  'c:s'   => \$o_crit,        
+  'w:s'   => \$o_warn
+          );
+if (!defined $o_host || !defined $o_community || !defined $o_crit || !defined $o_warn) {
+  Usage();
+  exit 3;
+}
+$o_warn =~ s/\%//g; 
+$o_crit =~ s/\%//g;
+alarm ($o_timeout);
+$SIG{'ALRM'} = sub {
+ print "Unable to contact host: $o_host\n";
+ exit 3;
+};
+
+# Connect to host
+my ($session,$error);
+($session, $error) = Net::SNMP->session(
+		-hostname  => $o_host,
+		-community => $o_community,
+		-port      => $o_port,
+		-timeout   => $o_timeout
+	  );
+if (!defined($session)) {
+   printf("Error opening session: %s.\n", $error);
+   exit 3;
+}
+
+my $exit_val=undef;
+my $resultat =  (Net::SNMP->VERSION < 4) ?
+	  $session->get_table($base_proc)
+	: $session->get_table(Baseoid => $base_proc);
+
+if (!defined($resultat)) {
+   printf("ERROR: Description table : %s.\n", $session->error);
+   $session->close;
+   exit 3;
+}
+
+$session->close;
+
+my ($cpu_used,$ncpu)=(0,0);
+foreach my $key ( keys %$resultat) {
+  if ($key =~ /$proc_load/) {
+    $cpu_used += $$resultat{$key};
+    $ncpu++;
+  }
+}
+
+if ($ncpu==0) {
+  print "Can't find CPU usage information : UNKNOWN\n";
+  exit 3;
+}
+
+$cpu_used /= $ncpu;
+
+print "$ncpu CPU, ", $ncpu==1 ? "load" : "average load";
+printf(" %.1f%%",$cpu_used);
+$exit_val=0;
+
+if ($cpu_used > $o_crit) {
+ print " > $o_crit% : CRITICAL\n";
+ $exit_val=2;
+} else {
+  if ($cpu_used > $o_warn) {
+   print " > $o_warn% : WARNING\n";
+   $exit_val=1;
+  }
+}
+print " < $o_warn% : OK\n" if ($exit_val eq 0);
+exit $exit_val;

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_datanode_storage.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_datanode_storage.php b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_datanode_storage.php
new file mode 100644
index 0000000..dee22b4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_datanode_storage.php
@@ -0,0 +1,100 @@
+<?php
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This plugin makes call to master node, get the jmx-json document
+ * check the storage capacity remaining on local datanode storage
+ */
+
+  include "hdp_nagios_init.php";
+
+  $options = getopt ("h:p:w:c:e:k:r:t:s:");
+  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options) 
+      || !array_key_exists('c', $options)) {
+    usage();
+    exit(3);
+  }
+
+  $host=$options['h'];
+  $port=$options['p'];
+  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
+  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
+  $keytab_path=$options['k'];
+  $principal_name=$options['r'];
+  $kinit_path_local=$options['t'];
+  $security_enabled=$options['s'];
+  $ssl_enabled=$options['e'];
+
+  /* Kinit if security enabled */
+  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
+  $retcode = $status[0];
+  $output = $status[1];
+  
+  if ($output != 0) {
+    echo "CRITICAL: Error doing kinit for nagios. $output";
+    exit (2);
+  }
+
+  $protocol = ($ssl_enabled == "true" ? "https" : "http");
+
+  /* Get the json document */
+  $ch = curl_init();
+  $username = rtrim(`id -un`, "\n");
+  curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=DataNode,name=FSDatasetState-*",
+                                CURLOPT_RETURNTRANSFER => true,
+                                CURLOPT_HTTPAUTH => CURLAUTH_ANY,
+                                CURLOPT_USERPWD => "$username:",
+                                CURLOPT_SSL_VERIFYPEER => FALSE ));
+  $json_string = curl_exec($ch);
+  $info = curl_getinfo($ch);
+  if (intval($info['http_code']) == 401){
+    logout();
+    $json_string = curl_exec($ch);
+  }
+  $info = curl_getinfo($ch);
+  curl_close($ch);
+  $json_array = json_decode($json_string, true);
+  $object = $json_array['beans'][0];
+  $cap_remain = $object['Remaining']; /* Total capacity - any extenal files created in data directories by non-hadoop app */
+  $cap_total = $object['Capacity']; /* Capacity used by all data partitions minus space reserved for M/R */
+  if (count($object) == 0) {
+    echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
+    exit(2);
+  }  
+  $percent_full = ($cap_total - $cap_remain)/$cap_total * 100;
+
+  $out_msg = "Capacity:[" . $cap_total . 
+             "], Remaining Capacity:[" . $cap_remain . 
+             "], percent_full:[" . $percent_full  . "]";
+  
+  if ($percent_full > $crit) {
+    echo "CRITICAL: " . $out_msg . "\n";
+    exit (2);
+  }
+  if ($percent_full > $warn) {
+    echo "WARNING: " . $out_msg . "\n";
+    exit (1);
+  }
+  echo "OK: " . $out_msg . "\n";
+  exit(0);
+
+  /* print usage */
+  function usage () {
+    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
+  }
+?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_hdfs_blocks.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_hdfs_blocks.php b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_hdfs_blocks.php
new file mode 100644
index 0000000..19347b4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_hdfs_blocks.php
@@ -0,0 +1,115 @@
+<?php
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This plugin makes call to master node, get the jmx-json document
+ * check the corrupt or missing blocks % is > threshod
+ * check_jmx -H hostaddress -p port -w 1% -c 1%
+ */
+
+  include "hdp_nagios_init.php";
+
+  $options = getopt ("h:p:w:c:s:e:k:r:t:u:");
+  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
+      || !array_key_exists('c', $options) || !array_key_exists('s', $options)) {
+    usage();
+    exit(3);
+  }
+
+  $hosts=$options['h'];
+  $port=$options['p'];
+  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
+  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
+  $nn_jmx_property=$options['s'];
+  $keytab_path=$options['k'];
+  $principal_name=$options['r'];
+  $kinit_path_local=$options['t'];
+  $security_enabled=$options['u'];
+  $ssl_enabled=$options['e'];
+
+  /* Kinit if security enabled */
+  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
+  $retcode = $status[0];
+  $output = $status[1];
+  
+  if ($output != 0) {
+    echo "CRITICAL: Error doing kinit for nagios. $output";
+    exit (2);
+  }
+
+  $protocol = ($ssl_enabled == "true" ? "https" : "http");
+
+
+  foreach (preg_split('/,/', $hosts) as $host) {
+    /* Get the json document */
+
+    $ch = curl_init();
+    $username = rtrim(`id -un`, "\n");
+    curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=".$nn_jmx_property,
+                                  CURLOPT_RETURNTRANSFER => true,
+                                  CURLOPT_HTTPAUTH => CURLAUTH_ANY,
+                                  CURLOPT_USERPWD => "$username:",
+                                  CURLOPT_SSL_VERIFYPEER => FALSE ));
+    $json_string = curl_exec($ch);
+    $info = curl_getinfo($ch);
+    if (intval($info['http_code']) == 401){
+      logout();
+      $json_string = curl_exec($ch);
+    }
+    $info = curl_getinfo($ch);
+    curl_close($ch);
+    $json_array = json_decode($json_string, true);
+    $m_percent = 0;
+    $c_percent = 0;
+    $object = $json_array['beans'][0];
+    $missing_blocks = $object['MissingBlocks'];
+    $corrupt_blocks = $object['CorruptBlocks'];
+    $total_blocks = $object['BlocksTotal'];
+    if (count($object) == 0) {
+      echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
+      exit(2);
+    }    
+    if($total_blocks == 0) {
+      $m_percent = 0;
+      $c_percent = 0;
+    } else {
+      $m_percent = ($missing_blocks/$total_blocks)*100;
+      $c_percent = ($corrupt_blocks/$total_blocks)*100;
+      break;
+    }
+  }
+  $out_msg = "corrupt_blocks:<" . $corrupt_blocks .
+             ">, missing_blocks:<" . $missing_blocks .
+             ">, total_blocks:<" . $total_blocks . ">";
+
+  if ($m_percent > $crit || $c_percent > $crit) {
+    echo "CRITICAL: " . $out_msg . "\n";
+    exit (2);
+  }
+  if ($m_percent > $warn || $c_percent > $warn) {
+    echo "WARNING: " . $out_msg . "\n";
+    exit (1);
+  }
+  echo "OK: " . $out_msg . "\n";
+  exit(0);
+
+  /* print usage */
+  function usage () {
+    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%> -s <namenode bean name> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
+  }
+?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_hdfs_capacity.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_hdfs_capacity.php b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_hdfs_capacity.php
new file mode 100644
index 0000000..af72723
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_hdfs_capacity.php
@@ -0,0 +1,109 @@
+<?php
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This plugin makes call to master node, get the jmx-json document
+ * check the % HDFS capacity used >= warn and critical limits.
+ * check_jmx -H hostaddress -p port -w 1 -c 1
+ */
+
+  include "hdp_nagios_init.php";
+
+  $options = getopt ("h:p:w:c:e:k:r:t:s:");
+  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
+      || !array_key_exists('c', $options)) {
+    usage();
+    exit(3);
+  }
+
+  $hosts=$options['h'];
+  $port=$options['p'];
+  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
+  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
+  $keytab_path=$options['k'];
+  $principal_name=$options['r'];
+  $kinit_path_local=$options['t'];
+  $security_enabled=$options['s'];
+  $ssl_enabled=$options['e'];
+
+  /* Kinit if security enabled */
+  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
+  $retcode = $status[0];
+  $output = $status[1];
+  
+  if ($output != 0) {
+    echo "CRITICAL: Error doing kinit for nagios. $output";
+    exit (2);
+  }
+
+  $protocol = ($ssl_enabled == "true" ? "https" : "http");
+
+
+  foreach (preg_split('/,/', $hosts) as $host) {
+    /* Get the json document */
+    $ch = curl_init();
+    $username = rtrim(`id -un`, "\n");
+    curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=FSNamesystemState",
+                                  CURLOPT_RETURNTRANSFER => true,
+                                  CURLOPT_HTTPAUTH => CURLAUTH_ANY,
+                                  CURLOPT_USERPWD => "$username:",
+                                  CURLOPT_SSL_VERIFYPEER => FALSE ));
+    $json_string = curl_exec($ch);
+    $info = curl_getinfo($ch);
+    if (intval($info['http_code']) == 401){
+      logout();
+      $json_string = curl_exec($ch);
+    }
+    $info = curl_getinfo($ch);
+    curl_close($ch);
+    $json_array = json_decode($json_string, true);
+    $percent = 0;
+    $object = $json_array['beans'][0];
+    $CapacityUsed = $object['CapacityUsed'];
+    $CapacityRemaining = $object['CapacityRemaining'];
+    if (count($object) == 0) {
+      echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
+      exit(2);
+    }    
+    $CapacityTotal = $CapacityUsed + $CapacityRemaining;
+    if($CapacityTotal == 0) {
+      $percent = 0;
+    } else {
+      $percent = ($CapacityUsed/$CapacityTotal)*100;
+      break;
+    }
+  }
+  $out_msg = "DFSUsedGB:<" . round ($CapacityUsed/(1024*1024*1024),1) .
+             ">, DFSTotalGB:<" . round($CapacityTotal/(1024*1024*1024),1) . ">";
+
+  if ($percent >= $crit) {
+    echo "CRITICAL: " . $out_msg . "\n";
+    exit (2);
+  }
+  if ($percent >= $warn) {
+    echo "WARNING: " . $out_msg . "\n";
+    exit (1);
+  }
+  echo "OK: " . $out_msg . "\n";
+  exit(0);
+
+  /* print usage */
+  function usage () {
+    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
+  }
+?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_hive_metastore_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_hive_metastore_status.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_hive_metastore_status.sh
new file mode 100644
index 0000000..640c077
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_hive_metastore_status.sh
@@ -0,0 +1,45 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#The uri is of the form thrift://<hostname>:<port>
+HOST=$1
+PORT=$2
+JAVA_HOME=$3
+SEC_ENABLED=$4
+if [[ "$SEC_ENABLED" == "true" ]]; then
+  NAGIOS_KEYTAB=$5
+  NAGIOS_USER=$6
+  KINIT_PATH=$7
+  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
+  if [[ "$?" -ne 0 ]]; then
+    echo "CRITICAL: Error doing kinit for nagios [$out1]";
+    exit 2;
+  fi
+fi
+HCAT_URL=-Dhive.metastore.uris="thrift://$HOST:$PORT"
+export JAVA_HOME=$JAVA_HOME
+out=`hcat $HCAT_URL -e "show databases" 2>&1`
+if [[ "$?" -ne 0 ]]; then
+  echo "CRITICAL: Error accessing Hive Metastore status [$out]";
+  exit 2;
+fi
+echo "OK: Hive Metastore status OK";
+exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_hue_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_hue_status.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_hue_status.sh
new file mode 100644
index 0000000..076d9b3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_hue_status.sh
@@ -0,0 +1,31 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+status=`/etc/init.d/hue status 2>&1`
+
+if [[ "$?" -ne 0 ]]; then
+	echo "WARNING: Hue is stopped";
+	exit 1;
+fi
+
+echo "OK: Hue is running";
+exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_mapred_local_dir_used.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_mapred_local_dir_used.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_mapred_local_dir_used.sh
new file mode 100644
index 0000000..15c85eb
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_mapred_local_dir_used.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+MAPRED_LOCAL_DIRS=$1
+CRITICAL=`echo $2 | cut -d % -f 1`
+IFS=","
+for mapred_dir in $MAPRED_LOCAL_DIRS
+do
+  percent=`df -hl $mapred_dir | awk '{percent=$5;} END{print percent}' | cut -d % -f 1`
+  if [ $percent -ge $CRITICAL ]; then
+    echo "CRITICAL: MapReduce local dir is full."
+    exit 2
+  fi
+done
+echo "OK: MapReduce local dir space is available."
+exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_name_dir_status.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_name_dir_status.php b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_name_dir_status.php
new file mode 100644
index 0000000..186166d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_name_dir_status.php
@@ -0,0 +1,93 @@
+<?php
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This plugin makes call to namenode, get the jmx-json document
+ * check the NameDirStatuses to find any offline (failed) directories
+ * check_jmx -H hostaddress -p port -k keytab path -r principal name -t kinit path -s security enabled
+ */
+ 
+  include "hdp_nagios_init.php";
+
+  $options = getopt("h:p:e:k:r:t:s:");
+  //Check only for mandatory options
+  if (!array_key_exists('h', $options) || !array_key_exists('p', $options)) {
+    usage();
+    exit(3);
+  }
+
+  $host=$options['h'];
+  $port=$options['p'];
+  $keytab_path=$options['k'];
+  $principal_name=$options['r'];
+  $kinit_path_local=$options['t'];
+  $security_enabled=$options['s'];
+  $ssl_enabled=$options['e'];
+  
+  /* Kinit if security enabled */
+  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
+  $retcode = $status[0];
+  $output = $status[1];
+  
+  if ($output != 0) {
+    echo "CRITICAL: Error doing kinit for nagios. $output";
+    exit (2);
+  }
+
+  $protocol = ($ssl_enabled == "true" ? "https" : "http");
+
+  /* Get the json document */
+  $ch = curl_init();
+  $username = rtrim(`id -un`, "\n");
+  curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo",
+                                CURLOPT_RETURNTRANSFER => true,
+                                CURLOPT_HTTPAUTH => CURLAUTH_ANY,
+                                CURLOPT_USERPWD => "$username:",
+                                CURLOPT_SSL_VERIFYPEER => FALSE ));
+  $json_string = curl_exec($ch);
+  $info = curl_getinfo($ch);
+  if (intval($info['http_code']) == 401){
+    logout();
+    $json_string = curl_exec($ch);
+  }
+  $info = curl_getinfo($ch);
+  curl_close($ch);
+  $json_array = json_decode($json_string, true);
+  $object = $json_array['beans'][0];
+  if ($object['NameDirStatuses'] == "") {
+    echo "WARNING: NameNode directory status not available via ".$protocol."://".$host.":".$port."/jmx url, code " . $info['http_code'] ."\n";
+    exit(1);
+  }
+  $NameDirStatuses = json_decode($object['NameDirStatuses'], true);
+  $failed_dir_count = count($NameDirStatuses['failed']);
+  $out_msg = "CRITICAL: Offline NameNode directories: ";
+  if ($failed_dir_count > 0) {
+    foreach ($NameDirStatuses['failed'] as $key => $value) {
+      $out_msg = $out_msg . $key . ":" . $value . ", ";
+    }
+    echo $out_msg . "\n";
+    exit (2);
+  }
+  echo "OK: All NameNode directories are active" . "\n";
+  exit(0);
+
+  /* print usage */
+  function usage () {
+    echo "Usage: $0 -h <host> -p port -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled";
+  }
+?>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_namenodes_ha.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_namenodes_ha.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_namenodes_ha.sh
new file mode 100644
index 0000000..50b075a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_namenodes_ha.sh
@@ -0,0 +1,82 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+IFS=',' read -a namenodes <<< "$1"
+port=$2
+totalNN=${#namenodes[@]}
+activeNN=()
+standbyNN=()
+unavailableNN=()
+
+for nn in "${namenodes[@]}"
+do
+  status=$(curl -m 5 -s http://$nn:$port/jmx?qry=Hadoop:service=NameNode,name=FSNamesystem | grep -i "tag.HAState" | grep -o -E "standby|active")
+  if [ "$status" == "active" ]; then
+    activeNN[${#activeNN[*]}]="$nn"
+  elif [ "$status" == "standby" ]; then
+    standbyNN[${#standbyNN[*]}]="$nn"
+  elif [ "$status" == "" ]; then
+    unavailableNN[${#unavailableNN[*]}]="$nn"
+  fi
+done
+
+message=""
+critical=false
+
+if [ ${#activeNN[@]} -gt 1 ]; then
+  critical=true
+  message=$message" Only one NN can have HAState=active;"
+elif [ ${#activeNN[@]} == 0 ]; then
+  critical=true
+  message=$message" No Active NN available;"
+elif [ ${#standbyNN[@]} == 0 ]; then
+  critical=true
+  message=$message" No Standby NN available;"
+fi
+
+NNstats=" Active<"
+for nn in "${activeNN[@]}"
+do
+  NNstats="$NNstats$nn;"
+done
+NNstats=${NNstats%\;}
+NNstats=$NNstats">, Standby<"
+for nn in "${standbyNN[@]}"
+do
+  NNstats="$NNstats$nn;"
+done
+NNstats=${NNstats%\;}
+NNstats=$NNstats">, Unavailable<"
+for nn in "${unavailableNN[@]}"
+do
+  NNstats="$NNstats$nn;"
+done
+NNstats=${NNstats%\;}
+NNstats=$NNstats">"
+
+if [ $critical == false ]; then
+  echo "OK: NameNode HA healthy;"$NNstats
+  exit 0
+fi
+
+echo "CRITICAL:"$message$NNstats
+exit 2

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_nodemanager_health.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_nodemanager_health.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_nodemanager_health.sh
new file mode 100644
index 0000000..020b41d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_nodemanager_health.sh
@@ -0,0 +1,44 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+HOST=$1
+PORT=$2
+NODEMANAGER_URL="http://$HOST:$PORT/ws/v1/node/info"
+SEC_ENABLED=$3
+export PATH="/usr/bin:$PATH"
+if [[ "$SEC_ENABLED" == "true" ]]; then
+  NAGIOS_KEYTAB=$4
+  NAGIOS_USER=$5
+  KINIT_PATH=$6
+  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
+  if [[ "$?" -ne 0 ]]; then
+    echo "CRITICAL: Error doing kinit for nagios [$out1]";
+    exit 2;
+  fi
+fi
+
+RESPONSE=`curl --negotiate -u : -s $NODEMANAGER_URL`
+if [[ "$RESPONSE" == *'"nodeHealthy":true'* ]]; then 
+  echo "OK: NodeManager healthy";
+  exit 0;
+fi
+echo "CRITICAL: NodeManager unhealthy";
+exit 2;

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_oozie_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_oozie_status.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_oozie_status.sh
new file mode 100644
index 0000000..820ee99
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_oozie_status.sh
@@ -0,0 +1,45 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+# OOZIE_URL is of the form http://<hostname>:<port>/oozie
+HOST=`echo $1 | tr '[:upper:]' '[:lower:]'`
+PORT=$2
+JAVA_HOME=$3
+SEC_ENABLED=$4
+if [[ "$SEC_ENABLED" == "true" ]]; then
+  NAGIOS_KEYTAB=$5
+  NAGIOS_USER=$6
+  KINIT_PATH=$7
+  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
+  if [[ "$?" -ne 0 ]]; then
+    echo "CRITICAL: Error doing kinit for nagios [$out1]";
+    exit 2;
+  fi
+fi
+OOZIE_URL="http://$HOST:$PORT/oozie"
+export JAVA_HOME=$JAVA_HOME
+out=`oozie admin -oozie ${OOZIE_URL} -status 2>&1`
+if [[ "$?" -ne 0 ]]; then 
+  echo "CRITICAL: Error accessing Oozie Server status [$out]";
+  exit 2;
+fi
+echo "OK: Oozie Server status [$out]";
+exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_rpcq_latency.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_rpcq_latency.php b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_rpcq_latency.php
new file mode 100644
index 0000000..463f69b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_rpcq_latency.php
@@ -0,0 +1,104 @@
+<?php
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This plugin makes call to master node, get the jmx-json document
+ * It checks the rpc wait time in the queue, RpcQueueTime_avg_time
+ * check_rpcq_latency -h hostaddress -p port -t ServiceName -w 1 -c 1
+ * Warning and Critical values are in seconds
+ * Service Name = JobTracker, NameNode, JobHistoryServer
+ */
+
+  include "hdp_nagios_init.php";
+
+  $options = getopt ("h:p:w:c:n:e:k:r:t:s:");
+  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
+      || !array_key_exists('c', $options) || !array_key_exists('n', $options)) {
+    usage();
+    exit(3);
+  }
+
+  $host=$options['h'];
+  $port=$options['p'];
+  $master=$options['n'];
+  $warn=$options['w'];
+  $crit=$options['c'];
+  $keytab_path=$options['k'];
+  $principal_name=$options['r'];
+  $kinit_path_local=$options['t'];
+  $security_enabled=$options['s'];
+  $ssl_enabled=$options['e'];
+
+  /* Kinit if security enabled */
+  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
+  $retcode = $status[0];
+  $output = $status[1];
+  
+  if ($output != 0) {
+    echo "CRITICAL: Error doing kinit for nagios. $output";
+    exit (2);
+  }
+
+  $protocol = ($ssl_enabled == "true" ? "https" : "http");
+
+
+  /* Get the json document */
+  $ch = curl_init();
+  $username = rtrim(`id -un`, "\n");
+  curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=".$master.",name=RpcActivityForPort*",
+                                CURLOPT_RETURNTRANSFER => true,
+                                CURLOPT_HTTPAUTH => CURLAUTH_ANY,
+                                CURLOPT_USERPWD => "$username:",
+                                CURLOPT_SSL_VERIFYPEER => FALSE ));
+  $json_string = curl_exec($ch);
+  $info = curl_getinfo($ch);
+  if (intval($info['http_code']) == 401){
+    logout();
+    $json_string = curl_exec($ch);
+  }
+  $info = curl_getinfo($ch);
+  curl_close($ch);
+  $json_array = json_decode($json_string, true);
+  $object = $json_array['beans'][0];
+  if (count($object) == 0) {
+    echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
+    exit(2);
+  } 
+  $RpcQueueTime_avg_time = round($object['RpcQueueTime_avg_time'], 2); 
+  $RpcProcessingTime_avg_time = round($object['RpcProcessingTime_avg_time'], 2);
+
+  $out_msg = "RpcQueueTime_avg_time:<" . $RpcQueueTime_avg_time .
+             "> Secs, RpcProcessingTime_avg_time:<" . $RpcProcessingTime_avg_time .
+             "> Secs";
+
+  if ($RpcQueueTime_avg_time >= $crit) {
+    echo "CRITICAL: " . $out_msg . "\n";
+    exit (2);
+  }
+  if ($RpcQueueTime_avg_time >= $warn) {
+    echo "WARNING: " . $out_msg . "\n";
+    exit (1);
+  }
+  echo "OK: " . $out_msg . "\n";
+  exit(0);
+
+  /* print usage */
+  function usage () {
+    echo "Usage: $0 -h <host> -p port -n <JobTracker/NameNode/JobHistoryServer> -w <warn_in_sec> -c <crit_in_sec> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
+  }
+?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_templeton_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_templeton_status.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_templeton_status.sh
new file mode 100644
index 0000000..7fbc4c4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/NAGIOS/package/files/check_templeton_status.sh
@@ -0,0 +1,45 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+# out='{"status":"ok","version":"v1"}<status_code:200>'
+HOST=$1
+PORT=$2
+VERSION=$3
+SEC_ENABLED=$4
+if [[ "$SEC_ENABLED" == "true" ]]; then 
+  NAGIOS_KEYTAB=$5
+  NAGIOS_USER=$6
+  KINIT_PATH=$7
+  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
+  if [[ "$?" -ne 0 ]]; then
+    echo "CRITICAL: Error doing kinit for nagios [$out1]";
+    exit 2;
+  fi
+fi
+regex="^.*\"status\":\"ok\".*<status_code:200>$"
+out=`curl --negotiate -u : -s -w '<status_code:%{http_code}>' http://$HOST:$PORT/templeton/$VERSION/status 2>&1`
+if [[ $out =~ $regex ]]; then
+  out=`echo "$out" | sed -e 's/{/[/g' | sed -e 's/}/]/g'` 
+  echo "OK: WebHCat Server status [$out]";
+  exit 0;
+fi
+echo "CRITICAL: Error accessing WebHCat Server, status [$out]";
+exit 2;


[34/37] AMBARI-4341. Rename 2.0.8 to 2.1.1 in the stack definition. (mahadev)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_grant_permissions.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_grant_permissions.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_grant_permissions.j2
new file mode 100644
index 0000000..9102d35
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_grant_permissions.j2
@@ -0,0 +1,21 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+grant '{{smoke_test_user}}', '{{smokeuser_permissions}}'
+exit
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_master_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_master_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_master_jaas.conf.j2
new file mode 100644
index 0000000..9cf35d3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_master_jaas.conf.j2
@@ -0,0 +1,25 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=true
+storeKey=true
+useTicketCache=false
+keyTab="{{master_keytab_path}}"
+principal="{{master_jaas_princ}}";
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2
new file mode 100644
index 0000000..bd1d727
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2
@@ -0,0 +1,25 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=true
+storeKey=true
+useTicketCache=false
+keyTab="{{regionserver_keytab_path}}"
+principal="{{regionserver_jaas_princ}}";
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/regionservers.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/regionservers.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/regionservers.j2
new file mode 100644
index 0000000..b22ae5f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/regionservers.j2
@@ -0,0 +1,2 @@
+{% for host in rs_hosts %}{{host}}
+{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/files/checkForFormat.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/files/checkForFormat.sh
new file mode 100644
index 0000000..d14091a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/files/checkForFormat.sh
@@ -0,0 +1,62 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+export hdfs_user=$1
+shift
+export conf_dir=$1
+shift
+export mark_dir=$1
+shift
+export name_dirs=$*
+
+export EXIT_CODE=0
+export command="namenode -format"
+export list_of_non_empty_dirs=""
+
+mark_file=/var/run/hadoop/hdfs/namenode-formatted
+if [[ -f ${mark_file} ]] ; then
+  rm -f ${mark_file}
+  mkdir -p ${mark_dir}
+fi
+
+if [[ ! -d $mark_dir ]] ; then
+  for dir in `echo $name_dirs | tr ',' ' '` ; do
+    echo "NameNode Dirname = $dir"
+    cmd="ls $dir | wc -l  | grep -q ^0$"
+    eval $cmd
+    if [[ $? -ne 0 ]] ; then
+      (( EXIT_CODE = $EXIT_CODE + 1 ))
+      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
+    fi
+  done
+
+  if [[ $EXIT_CODE == 0 ]] ; then
+    su - ${hdfs_user} -c "yes Y | hadoop --config ${conf_dir} ${command}"
+  else
+    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
+  fi
+else
+  echo "${mark_dir} exists. Namenode DFS already formatted"
+fi
+
+exit $EXIT_CODE
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/files/checkWebUI.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/files/checkWebUI.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/files/checkWebUI.py
new file mode 100644
index 0000000..f8e9c1a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/files/checkWebUI.py
@@ -0,0 +1,53 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import optparse
+import httplib
+
+#
+# Main.
+#
+def main():
+  parser = optparse.OptionParser(usage="usage: %prog [options] component ")
+  parser.add_option("-m", "--hosts", dest="hosts", help="Comma separated hosts list for WEB UI to check it availability")
+  parser.add_option("-p", "--port", dest="port", help="Port of WEB UI to check it availability")
+
+  (options, args) = parser.parse_args()
+  
+  hosts = options.hosts.split(',')
+  port = options.port
+
+  for host in hosts:
+    try:
+      conn = httplib.HTTPConnection(host, port)
+      # This can be modified to get a partial url part to be sent with request
+      conn.request("GET", "/")
+      httpCode = conn.getresponse().status
+      conn.close()
+    except Exception:
+      httpCode = 404
+
+    if httpCode != 200:
+      print "Cannot access WEB UI on: http://" + host + ":" + port
+      exit(1)
+      
+
+if __name__ == "__main__":
+  main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/datanode.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/datanode.py
new file mode 100644
index 0000000..eaa27cf
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/datanode.py
@@ -0,0 +1,57 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from hdfs_datanode import datanode
+
+
+class DataNode(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env)
+    env.set_params(params)
+
+  def start(self, env):
+    import params
+
+    env.set_params(params)
+    self.config(env)
+    datanode(action="start")
+
+  def stop(self, env):
+    import params
+
+    env.set_params(params)
+    datanode(action="stop")
+
+  def config(self, env):
+    import params
+
+    datanode(action="configure")
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    check_process_status(status_params.datanode_pid_file)
+
+
+if __name__ == "__main__":
+  DataNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/hdfs_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/hdfs_client.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/hdfs_client.py
new file mode 100644
index 0000000..ec24c7d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/hdfs_client.py
@@ -0,0 +1,52 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from utils import service
+
+
+class HdfsClient(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env)
+    env.set_params(params)
+    self.config(env)
+
+  def start(self, env):
+    import params
+
+    env.set_params(params)
+
+  def stop(self, env):
+    import params
+
+    env.set_params(params)
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+  def config(self, env):
+    import params
+
+    pass
+
+
+if __name__ == "__main__":
+  HdfsClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/hdfs_datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/hdfs_datanode.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/hdfs_datanode.py
new file mode 100644
index 0000000..aa7b5e6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/hdfs_datanode.py
@@ -0,0 +1,59 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from utils import service
+import os
+
+def datanode(action=None):
+  import params
+
+  if action == "configure":
+    Directory(params.dfs_domain_socket_dir,
+              recursive=True,
+              mode=0750,
+              owner=params.hdfs_user,
+              group=params.user_group)
+    Directory(os.path.dirname(params.dfs_data_dir),
+              recursive=True,
+              mode=0755)
+    Directory(params.dfs_data_dir,
+              recursive=False,
+              mode=0750,
+              owner=params.hdfs_user,
+              group=params.user_group)
+
+  if action == "start":
+    service(
+      action=action, name="datanode",
+      user=params.hdfs_user,
+      create_pid_dir=True,
+      create_log_dir=True,
+      keytab=params.dfs_datanode_keytab_file,
+      principal=params.dfs_datanode_kerberos_principal
+    )
+  if action == "stop":
+    service(
+      action=action, name="datanode",
+      user=params.hdfs_user,
+      create_pid_dir=True,
+      create_log_dir=True,
+      keytab=params.dfs_datanode_keytab_file,
+      principal=params.dfs_datanode_kerberos_principal
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/hdfs_namenode.py
new file mode 100644
index 0000000..d8e191f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/hdfs_namenode.py
@@ -0,0 +1,192 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from utils import service
+from utils import hdfs_directory
+import urlparse
+
+
+def namenode(action=None, format=True):
+  import params
+
+  if action == "configure":
+    create_name_dirs(params.dfs_name_dir)
+
+  if action == "start":
+    if format:
+      format_namenode()
+      pass
+    service(
+      action="start", name="namenode", user=params.hdfs_user,
+      keytab=params.dfs_namenode_keytab_file,
+      create_pid_dir=True,
+      create_log_dir=True,
+      principal=params.dfs_namenode_kerberos_principal
+    )
+
+    # TODO: extract creating of dirs to different services
+    create_app_directories()
+    create_user_directories()
+
+  if action == "stop":
+    service(
+      action="stop", name="namenode", user=params.hdfs_user,
+      keytab=params.dfs_namenode_keytab_file,
+      principal=params.dfs_namenode_kerberos_principal
+    )
+
+  if action == "decommission":
+    decommission()
+
+def create_name_dirs(directories):
+  import params
+
+  dirs = directories.split(",")
+  Directory(dirs,
+            mode=0755,
+            owner=params.hdfs_user,
+            group=params.user_group,
+            recursive=True
+  )
+
+
+def create_app_directories():
+  import params
+
+  hdfs_directory(name="/tmp",
+                 owner=params.hdfs_user,
+                 mode="777"
+  )
+  #mapred directories
+  if params.has_jobtracker:
+    hdfs_directory(name="/mapred",
+                   owner=params.mapred_user
+    )
+    hdfs_directory(name="/mapred/system",
+                   owner=params.mapred_user
+    )
+    #hbase directories
+  if len(params.hbase_master_hosts) != 0:
+    hdfs_directory(name=params.hbase_hdfs_root_dir,
+                   owner=params.hbase_user
+    )
+    hdfs_directory(name=params.hbase_staging_dir,
+                   owner=params.hbase_user,
+                   mode="711"
+    )
+    #hive directories
+  if len(params.hive_server_host) != 0:
+    hdfs_directory(name=params.hive_apps_whs_dir,
+                   owner=params.hive_user,
+                   mode="777"
+    )
+  if len(params.hcat_server_hosts) != 0:
+    hdfs_directory(name=params.webhcat_apps_dir,
+                   owner=params.webhcat_user,
+                   mode="755"
+    )
+  if len(params.hs_host) != 0:
+    hdfs_directory(name=params.mapreduce_jobhistory_intermediate_done_dir,
+                   owner=params.mapred_user,
+                   group=params.user_group,
+                   mode="777"
+    )
+
+    hdfs_directory(name=params.mapreduce_jobhistory_done_dir,
+                   owner=params.mapred_user,
+                   group=params.user_group,
+                   mode="777"
+    )
+
+  pass
+
+
+def create_user_directories():
+  import params
+
+  hdfs_directory(name=params.smoke_hdfs_user_dir,
+                 owner=params.smoke_user,
+                 mode=params.smoke_hdfs_user_mode
+  )
+
+  if params.has_hive_server_host:
+    hdfs_directory(name=params.hive_hdfs_user_dir,
+                   owner=params.hive_user,
+                   mode=params.hive_hdfs_user_mode
+    )
+
+  if params.has_hcat_server_host:
+    if params.hcat_hdfs_user_dir != params.webhcat_hdfs_user_dir:
+      hdfs_directory(name=params.hcat_hdfs_user_dir,
+                     owner=params.hcat_user,
+                     mode=params.hcat_hdfs_user_mode
+      )
+    hdfs_directory(name=params.webhcat_hdfs_user_dir,
+                   owner=params.webhcat_user,
+                   mode=params.webhcat_hdfs_user_mode
+    )
+
+  if params.has_oozie_server:
+    hdfs_directory(name=params.oozie_hdfs_user_dir,
+                   owner=params.oozie_user,
+                   mode=params.oozie_hdfs_user_mode
+    )
+
+
+def format_namenode(force=None):
+  import params
+
+  mark_dir = params.namenode_formatted_mark_dir
+  dfs_name_dir = params.dfs_name_dir
+  hdfs_user = params.hdfs_user
+  hadoop_conf_dir = params.hadoop_conf_dir
+
+  if True:
+    if force:
+      ExecuteHadoop('namenode -format',
+                    kinit_override=True)
+    else:
+      File('/tmp/checkForFormat.sh',
+           content=StaticFile("checkForFormat.sh"),
+           mode=0755)
+      Execute(format(
+        "sh /tmp/checkForFormat.sh {hdfs_user} {hadoop_conf_dir} {mark_dir} "
+        "{dfs_name_dir}"),
+              not_if=format("test -d {mark_dir}"),
+              path="/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin")
+    Execute(format("mkdir -p {mark_dir}"))
+
+
+def decommission():
+  import params
+
+  hdfs_user = params.hdfs_user
+  conf_dir = params.hadoop_conf_dir
+
+  File(params.exclude_file_path,
+       content=Template("exclude_hosts_list.j2"),
+       owner=hdfs_user,
+       group=params.user_group
+  )
+
+  ExecuteHadoop('dfsadmin -refreshNodes',
+                user=hdfs_user,
+                conf_dir=conf_dir,
+                kinit_override=True)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/hdfs_snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/hdfs_snamenode.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/hdfs_snamenode.py
new file mode 100644
index 0000000..a943455
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/hdfs_snamenode.py
@@ -0,0 +1,53 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from utils import service
+from utils import hdfs_directory
+
+
+def snamenode(action=None, format=False):
+  import params
+
+  if action == "configure":
+    Directory(params.fs_checkpoint_dir,
+              recursive=True,
+              mode=0755,
+              owner=params.hdfs_user,
+              group=params.user_group)
+  elif action == "start":
+    service(
+      action=action,
+      name="secondarynamenode",
+      user=params.hdfs_user,
+      create_pid_dir=True,
+      create_log_dir=True,
+      keytab=params.dfs_secondary_namenode_keytab_file,
+      principal=params.dfs_secondary_namenode_kerberos_principal
+    )
+  elif action == "stop":
+    service(
+      action=action,
+      name="secondarynamenode",
+      user=params.hdfs_user,
+      create_pid_dir=True,
+      create_log_dir=True,
+      keytab=params.dfs_secondary_namenode_keytab_file,
+      principal=params.dfs_secondary_namenode_kerberos_principal
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/namenode.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/namenode.py
new file mode 100644
index 0000000..80700c8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/namenode.py
@@ -0,0 +1,66 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from hdfs_namenode import namenode
+
+
+class NameNode(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env)
+    env.set_params(params)
+
+  def start(self, env):
+    import params
+
+    env.set_params(params)
+    self.config(env)
+    namenode(action="start")
+
+  def stop(self, env):
+    import params
+
+    env.set_params(params)
+    namenode(action="stop")
+
+  def config(self, env):
+    import params
+
+    env.set_params(params)
+    namenode(action="configure")
+    pass
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+    check_process_status(status_params.namenode_pid_file)
+    pass
+
+  def decommission(self, env):
+    import params
+
+    env.set_params(params)
+    namenode(action="decommission")
+    pass
+
+if __name__ == "__main__":
+  NameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/params.py
new file mode 100644
index 0000000..3e0e65b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/params.py
@@ -0,0 +1,165 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import status_params
+import os
+
+config = Script.get_config()
+
+#security params
+security_enabled = config['configurations']['global']['security_enabled']
+dfs_journalnode_keytab_file = config['configurations']['hdfs-site']['dfs.journalnode.keytab.file']
+dfs_web_authentication_kerberos_keytab = config['configurations']['hdfs-site']['dfs.journalnode.keytab.file']
+dfs_secondary_namenode_keytab_file =  config['configurations']['hdfs-site']['dfs.secondary.namenode.keytab.file']
+dfs_datanode_keytab_file =  config['configurations']['hdfs-site']['dfs.datanode.keytab.file']
+dfs_namenode_keytab_file =  config['configurations']['hdfs-site']['dfs.namenode.keytab.file']
+smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
+hdfs_user_keytab = config['configurations']['global']['hdfs_user_keytab']
+
+dfs_datanode_kerberos_principal = config['configurations']['hdfs-site']['dfs.datanode.kerberos.principal']
+dfs_journalnode_kerberos_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.principal']
+dfs_secondary_namenode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.internal.spnego.principal']
+dfs_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.namenode.kerberos.principal']
+dfs_web_authentication_kerberos_principal = config['configurations']['hdfs-site']['dfs.web.authentication.kerberos.principal']
+dfs_secondary_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.principal']
+dfs_journalnode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.internal.spnego.principal']
+
+#exclude file
+hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
+exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
+
+kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+#hosts
+hostname = config["hostname"]
+rm_host = default("/clusterHostInfo/rm_host", [])
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
+oozie_servers = default("/clusterHostInfo/oozie_server", [])
+hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
+hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+hs_host = default("/clusterHostInfo/hs_host", [])
+jtnode_host = default("/clusterHostInfo/jtnode_host", [])
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+nm_host = default("/clusterHostInfo/nm_host", [])
+ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
+journalnode_hosts = default("/clusterHostInfo/journalnode_hosts", [])
+zkfc_hosts = default("/clusterHostInfo/zkfc_hosts", [])
+
+has_ganglia_server = not len(ganglia_server_hosts) == 0
+has_namenodes = not len(namenode_host) == 0
+has_jobtracker = not len(jtnode_host) == 0
+has_resourcemanager = not len(rm_host) == 0
+has_histroryserver = not len(hs_host) == 0
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_slaves = not len(slave_hosts) == 0
+has_nagios = not len(hagios_server_hosts) == 0
+has_oozie_server = not len(oozie_servers)  == 0
+has_hcat_server_host = not len(hcat_server_hosts)  == 0
+has_hive_server_host = not len(hive_server_host)  == 0
+has_journalnode_hosts = not len(journalnode_hosts)  == 0
+has_zkfc_hosts = not len(zkfc_hosts)  == 0
+
+
+is_namenode_master = hostname in namenode_host
+is_jtnode_master = hostname in jtnode_host
+is_rmnode_master = hostname in rm_host
+is_hsnode_master = hostname in hs_host
+is_hbase_master = hostname in hbase_master_hosts
+is_slave = hostname in slave_hosts
+
+if has_ganglia_server:
+  ganglia_server_host = ganglia_server_hosts[0]
+
+#users and groups
+yarn_user = config['configurations']['global']['yarn_user']
+hbase_user = config['configurations']['global']['hbase_user']
+nagios_user = config['configurations']['global']['nagios_user']
+oozie_user = config['configurations']['global']['oozie_user']
+webhcat_user = config['configurations']['global']['hcat_user']
+hcat_user = config['configurations']['global']['hcat_user']
+hive_user = config['configurations']['global']['hive_user']
+smoke_user =  config['configurations']['global']['smokeuser']
+mapred_user = config['configurations']['global']['mapred_user']
+hdfs_user = status_params.hdfs_user
+
+user_group = config['configurations']['global']['user_group']
+proxyuser_group =  config['configurations']['global']['proxyuser_group']
+nagios_group = config['configurations']['global']['nagios_group']
+smoke_user_group = "users"
+
+#hadoop params
+hadoop_conf_dir = "/etc/hadoop/conf"
+hadoop_pid_dir_prefix = status_params.hadoop_pid_dir_prefix
+hadoop_bin = "/usr/lib/hadoop/bin"
+
+hdfs_log_dir_prefix = config['configurations']['global']['hdfs_log_dir_prefix']
+
+dfs_domain_socket_path = "/var/lib/hadoop-hdfs/dn_socket"
+dfs_domain_socket_dir = os.path.dirname(dfs_domain_socket_path)
+
+hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
+
+jn_edits_dir = config['configurations']['hdfs-site']['dfs.journalnode.edits.dir']#"/grid/0/hdfs/journal"
+
+# if stack_version[0] == "2":
+#dfs_name_dir = config['configurations']['hdfs-site']['dfs.namenode.name.dir']
+# else:
+dfs_name_dir = config['configurations']['hdfs-site']['dfs.name.dir']#","/tmp/hadoop-hdfs/dfs/name")
+
+namenode_dirs_created_stub_dir = format("{hdfs_log_dir_prefix}/{hdfs_user}")
+namenode_dirs_stub_filename = "namenode_dirs_created"
+
+hbase_hdfs_root_dir = config['configurations']['hbase-site']['hbase.rootdir']#","/apps/hbase/data")
+hbase_staging_dir = "/apps/hbase/staging"
+hive_apps_whs_dir = config['configurations']['hive-site']["hive.metastore.warehouse.dir"] #, "/apps/hive/warehouse")
+webhcat_apps_dir = "/apps/webhcat"
+mapreduce_jobhistory_intermediate_done_dir = config['configurations']['mapred-site']['mapreduce.jobhistory.intermediate-done-dir']#","/app-logs")
+mapreduce_jobhistory_done_dir = config['configurations']['mapred-site']['mapreduce.jobhistory.done-dir']#","/mr-history/done")
+
+if has_oozie_server:
+  oozie_hdfs_user_dir = format("/user/{oozie_user}")
+  oozie_hdfs_user_mode = 775
+if has_hcat_server_host:
+  hcat_hdfs_user_dir = format("/user/{hcat_user}")
+  hcat_hdfs_user_mode = 755
+  webhcat_hdfs_user_dir = format("/user/{webhcat_user}")
+  webhcat_hdfs_user_mode = 755
+if has_hive_server_host:
+  hive_hdfs_user_dir = format("/user/{hive_user}")
+  hive_hdfs_user_mode = 700
+smoke_hdfs_user_dir = format("/user/{smoke_user}")
+smoke_hdfs_user_mode = 770
+
+namenode_formatted_mark_dir = format("{hadoop_pid_dir_prefix}/hdfs/namenode/formatted/")
+
+# if stack_version[0] == "2":
+#fs_checkpoint_dir = config['configurations']['hdfs-site']['dfs.namenode.checkpoint.dir'] #","/tmp/hadoop-hdfs/dfs/namesecondary")
+# else:
+fs_checkpoint_dir = config['configurations']['core-site']['fs.checkpoint.dir']#","/tmp/hadoop-hdfs/dfs/namesecondary")
+
+# if stack_version[0] == "2":
+#dfs_data_dir = config['configurations']['hdfs-site']['dfs.datanode.data.dir']#,"/tmp/hadoop-hdfs/dfs/data")
+# else:
+dfs_data_dir = config['configurations']['hdfs-site']['dfs.data.dir']#,"/tmp/hadoop-hdfs/dfs/data")
+
+
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/service_check.py
new file mode 100644
index 0000000..5cd264b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/service_check.py
@@ -0,0 +1,106 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+class HdfsServiceCheck(Script):
+  def service_check(self, env):
+    import params
+
+    env.set_params(params)
+    unique = get_unique_id_and_date()
+    dir = '/tmp'
+    tmp_file = format("{dir}/{unique}")
+
+    safemode_command = "dfsadmin -safemode get | grep OFF"
+
+    create_dir_cmd = format("fs -mkdir {dir} ; hadoop fs -chmod -R 777 {dir}")
+    test_dir_exists = format("hadoop fs -test -e {dir}")
+    cleanup_cmd = format("fs -rm {tmp_file}")
+    #cleanup put below to handle retries; if retrying there wil be a stale file
+    #that needs cleanup; exit code is fn of second command
+    create_file_cmd = format(
+      "{cleanup_cmd}; hadoop fs -put /etc/passwd {tmp_file}")
+    test_cmd = format("fs -test -e {tmp_file}")
+    if params.security_enabled:
+      Execute(format(
+        "su - {smoke_user} -c '{kinit_path_local} -kt {smoke_user_keytab} "
+        "{smoke_user}'"))
+    ExecuteHadoop(safemode_command,
+                  user=params.smoke_user,
+                  logoutput=True,
+                  conf_dir=params.hadoop_conf_dir,
+                  try_sleep=15,
+                  tries=20
+    )
+    ExecuteHadoop(create_dir_cmd,
+                  user=params.smoke_user,
+                  logoutput=True,
+                  not_if=test_dir_exists,
+                  conf_dir=params.hadoop_conf_dir,
+                  try_sleep=3,
+                  tries=5
+    )
+    ExecuteHadoop(create_file_cmd,
+                  user=params.smoke_user,
+                  logoutput=True,
+                  conf_dir=params.hadoop_conf_dir,
+                  try_sleep=3,
+                  tries=5
+    )
+    ExecuteHadoop(test_cmd,
+                  user=params.smoke_user,
+                  logoutput=True,
+                  conf_dir=params.hadoop_conf_dir,
+                  try_sleep=3,
+                  tries=5
+    )
+    if params.has_journalnode_hosts:
+      journalnode_port = params.journalnode_port
+      smoke_test_user = params.smoke_user
+      checkWebUIFileName = "checkWebUI.py"
+      checkWebUIFilePath = format("/tmp/{checkWebUIFileName}")
+      comma_sep_jn_hosts = ",".join(params.journalnode_hosts)
+      checkWebUICmd = format(
+        "su - {smoke_test_user} -c 'python {checkWebUIFilePath} -m "
+        "{comma_sep_jn_hosts} -p {journalnode_port}'")
+      File(checkWebUIFilePath,
+           content=StaticFile(checkWebUIFileName))
+
+      Execute(checkWebUICmd,
+              logoutput=True,
+              try_sleep=3,
+              tries=5
+      )
+
+    if params.has_zkfc_hosts:
+      pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
+      pid_file = format("{pid_dir}/hadoop-{hdfs_user}-zkfc.pid")
+      check_zkfc_process_cmd = format(
+        "ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
+      Execute(check_zkfc_process_cmd,
+              logoutput=True,
+              try_sleep=3,
+              tries=5
+      )
+
+
+if __name__ == "__main__":
+  HdfsServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/snamenode.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/snamenode.py
new file mode 100644
index 0000000..8f682ec
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/snamenode.py
@@ -0,0 +1,64 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from hdfs_snamenode import snamenode
+
+
+class SNameNode(Script):
+  def install(self, env):
+    import params
+
+    env.set_params(params)
+
+    self.install_packages(env)
+
+
+  def start(self, env):
+    import params
+
+    env.set_params(params)
+
+    self.config(env)
+    snamenode(action="start")
+
+  def stop(self, env):
+    import params
+
+    env.set_params(params)
+
+    snamenode(action="stop")
+
+  def config(self, env):
+    import params
+
+    env.set_params(params)
+
+    snamenode(action="configure")
+
+  def status(self, env):
+    import status_params
+
+    env.set_params(status_params)
+
+    check_process_status(status_params.snamenode_pid_file)
+
+
+if __name__ == "__main__":
+  SNameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/status_params.py
new file mode 100644
index 0000000..4097373
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/status_params.py
@@ -0,0 +1,31 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+hadoop_pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix']
+hdfs_user = config['configurations']['global']['hdfs_user']
+hdp_pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
+datanode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-datanode.pid")
+namenode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-namenode.pid")
+snamenode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-secondarynamenode.pid")
+journalnode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-journalnode.pid")
+zkfc_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-zkfc.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/utils.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/utils.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/utils.py
new file mode 100644
index 0000000..e28d0e9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/scripts/utils.py
@@ -0,0 +1,133 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+def service(action=None, name=None, user=None, create_pid_dir=False,
+            create_log_dir=False, keytab=None, principal=None):
+  import params
+
+  kinit_cmd = "true"
+  pid_dir = format("{hadoop_pid_dir_prefix}/{user}")
+  pid_file = format("{pid_dir}/hadoop-{user}-{name}.pid")
+  log_dir = format("{hdfs_log_dir_prefix}/{user}")
+  hadoop_daemon = format(
+    "export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && "
+    "{hadoop_bin}/hadoop-daemon.sh")
+  cmd = format("{hadoop_daemon} --config {hadoop_conf_dir}")
+
+  if create_pid_dir:
+    Directory(pid_dir,
+              owner=user,
+              recursive=True)
+  if create_log_dir:
+    Directory(log_dir,
+              owner=user,
+              recursive=True)
+
+  if params.security_enabled:
+    principal_replaced = principal.replace("_HOST", params.hostname)
+    kinit_cmd = format("kinit -kt {keytab} {principal_replaced}")
+
+    if name == "datanode":
+      user = "root"
+      pid_file = format(
+        "{hadoop_pid_dir_prefix}/{hdfs_user}/hadoop-{hdfs_user}-{name}.pid")
+
+  daemon_cmd = format("{cmd} {action} {name}")
+
+  service_is_up = format(
+    "ls {pid_file} >/dev/null 2>&1 &&"
+    " ps `cat {pid_file}` >/dev/null 2>&1") if action == "start" else None
+
+  Execute(kinit_cmd)
+  Execute(daemon_cmd,
+          user = user,
+          not_if=service_is_up
+  )
+  if action == "stop":
+    File(pid_file,
+         action="delete",
+         ignore_failures=True
+    )
+
+
+def hdfs_directory(name=None, owner=None, group=None,
+                   mode=None, recursive_chown=False, recursive_chmod=False):
+  import params
+
+  dir_exists = format("hadoop fs -ls {name} >/dev/null 2>&1")
+  namenode_safe_mode_off = "hadoop dfsadmin -safemode get|grep 'Safe mode is OFF'"
+
+  stub_dir = params.namenode_dirs_created_stub_dir
+  stub_filename = params.namenode_dirs_stub_filename
+  dir_absent_in_stub = format(
+    "grep -q '^{name}$' {stub_dir}/{stub_filename} > /dev/null 2>&1; test $? -ne 0")
+  record_dir_in_stub = format("echo '{name}' >> {stub_dir}/{stub_filename}")
+  tries = 3
+  try_sleep = 10
+  dfs_check_nn_status_cmd = "true"
+
+  #if params.stack_version[0] == "2":
+  #mkdir_cmd = format("fs -mkdir -p {name}")
+  #else:
+  mkdir_cmd = format("fs -mkdir {name}")
+
+  if params.security_enabled:
+    Execute(format("kinit -kt {hdfs_user_keytab} {hdfs_user}"),
+            user = params.hdfs_user)
+  ExecuteHadoop(mkdir_cmd,
+                try_sleep=try_sleep,
+                tries=tries,
+                not_if=format(
+                  "{dir_absent_in_stub} && {dfs_check_nn_status_cmd} && "
+                  "{dir_exists} && ! {namenode_safe_mode_off}"),
+                only_if=format(
+                  "su - hdfs -c '{dir_absent_in_stub} && {dfs_check_nn_status_cmd} && "
+                  "! {dir_exists}'"),
+                conf_dir=params.hadoop_conf_dir,
+                user=params.hdfs_user
+  )
+  Execute(record_dir_in_stub,
+          user=params.hdfs_user,
+          only_if=format("! {dir_absent_in_stub}")
+  )
+
+  recursive = "-R" if recursive_chown else ""
+  perm_cmds = []
+
+  if owner:
+    chown = owner
+    if group:
+      chown = format("{owner}:{group}")
+    perm_cmds.append(format("fs -chown {recursive} {chown} {name}"))
+  if mode:
+    perm_cmds.append(format("fs -chmod {recursive} {mode} {name}"))
+  for cmd in perm_cmds:
+    ExecuteHadoop(cmd,
+                  user=params.hdfs_user,
+                  only_if=format("su - hdfs -c '{dir_absent_in_stub} && {dfs_check_nn_status_cmd} && {namenode_safe_mode_off} && {dir_exists}'"),
+                  try_sleep=try_sleep,
+                  tries=tries,
+                  conf_dir=params.hadoop_conf_dir
+    )
+
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/templates/exclude_hosts_list.j2
new file mode 100644
index 0000000..c3af46e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HDFS/package/templates/exclude_hosts_list.j2
@@ -0,0 +1,3 @@
+{% for host in hdfs_exclude_file %}
+{{host}}
+{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/addMysqlUser.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/addMysqlUser.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/addMysqlUser.sh
new file mode 100644
index 0000000..8d31b91
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/addMysqlUser.sh
@@ -0,0 +1,41 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+mysqldservice=$1
+mysqldbuser=$2
+mysqldbpasswd=$3
+mysqldbhost=$4
+myhostname=$(hostname -f)
+
+service $mysqldservice start
+echo "Adding user $mysqldbuser@$mysqldbhost and $mysqldbuser@localhost"
+mysql -u root -e "CREATE USER '$mysqldbuser'@'$mysqldbhost' IDENTIFIED BY '$mysqldbpasswd';"
+mysql -u root -e "CREATE USER '$mysqldbuser'@'localhost' IDENTIFIED BY '$mysqldbpasswd';"
+mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'$mysqldbhost';"
+mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'localhost';"
+if [ '$(mysql -u root -e "select user from mysql.user where user='$mysqldbuser' and host='$myhostname'" | grep "$mysqldbuser")' != '0' ]; then
+  echo "Adding user $mysqldbuser@$myhostname";
+  mysql -u root -e "CREATE USER '$mysqldbuser'@'$myhostname' IDENTIFIED BY '$mysqldbpasswd';";
+  mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'$myhostname';";
+fi
+mysql -u root -e "flush privileges;"
+service $mysqldservice stop

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/hcatSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/hcatSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/hcatSmoke.sh
new file mode 100644
index 0000000..9e7b33f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/hcatSmoke.sh
@@ -0,0 +1,35 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+export tablename=$1
+
+case "$2" in
+
+prepare)
+  hcat -e "show tables"
+  hcat -e "drop table IF EXISTS ${tablename}"
+  hcat -e "create table ${tablename} ( id INT, name string ) stored as rcfile ;"
+;;
+
+cleanup)
+  hcat -e "drop table IF EXISTS ${tablename}"
+;;
+
+esac

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/hiveSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/hiveSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/hiveSmoke.sh
new file mode 100644
index 0000000..7e03524
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/hiveSmoke.sh
@@ -0,0 +1,23 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+export tablename=$1
+echo "CREATE EXTERNAL TABLE IF NOT EXISTS ${tablename} ( foo INT, bar STRING );" | hive
+echo "DESCRIBE ${tablename};" | hive

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/hiveserver2.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/hiveserver2.sql b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/hiveserver2.sql
new file mode 100644
index 0000000..99a3865
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/hiveserver2.sql
@@ -0,0 +1,23 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+CREATE EXTERNAL TABLE IF NOT EXISTS hiveserver2smoke20408 ( foo INT, bar STRING );
+DESCRIBE hiveserver2smoke20408;

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/hiveserver2Smoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/hiveserver2Smoke.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/hiveserver2Smoke.sh
new file mode 100644
index 0000000..051a21e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/hiveserver2Smoke.sh
@@ -0,0 +1,31 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+smokeout=`/usr/lib/hive/bin/beeline -u $1 -n fakeuser -p fakepwd -d org.apache.hive.jdbc.HiveDriver -e '!run $2' 2>&1| awk '{print}'|grep Error`
+
+if [ "x$smokeout" == "x" ]; then
+  echo "Smoke test of hiveserver2 passed"
+  exit 0
+else
+  echo "Smoke test of hiveserver2 wasnt passed"
+  echo $smokeout
+  exit 1
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/pigSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/pigSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/pigSmoke.sh
new file mode 100644
index 0000000..2e90ac0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/pigSmoke.sh
@@ -0,0 +1,18 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License
+
+A = load 'passwd' using PigStorage(':');
+B = foreach A generate \$0 as id;
+store B into 'pigsmoke.out';

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/startHiveserver2.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/startHiveserver2.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/startHiveserver2.sh
new file mode 100644
index 0000000..fa90c2f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/startHiveserver2.sh
@@ -0,0 +1,22 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+HIVE_CONF_DIR=$4 /usr/lib/hive/bin/hiveserver2 -hiveconf hive.metastore.uris=' ' > $1 2> $2 &
+echo $!|cat>$3

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/startMetastore.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/startMetastore.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/startMetastore.sh
new file mode 100644
index 0000000..9350776
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/files/startMetastore.sh
@@ -0,0 +1,22 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+HIVE_CONF_DIR=$4 hive --service metastore > $1 2> $2 &
+echo $!|cat>$3

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/__init__.py
new file mode 100644
index 0000000..5561e10
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/__init__.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hcat.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hcat.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hcat.py
new file mode 100644
index 0000000..2993d3a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hcat.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import sys
+
+
+def hcat():
+  import params
+
+  Directory(params.hcat_conf_dir,
+            owner=params.hcat_user,
+            group=params.user_group,
+  )
+
+  Directory(params.hcat_pid_dir,
+            owner=params.webhcat_user,
+            recursive=True
+  )
+
+  hcat_TemplateConfig('hcat-env.sh')
+
+
+def hcat_TemplateConfig(name):
+  import params
+
+  TemplateConfig(format("{hcat_conf_dir}/{name}"),
+                 owner=params.hcat_user,
+                 group=params.user_group
+  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hcat_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hcat_client.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hcat_client.py
new file mode 100644
index 0000000..54a8937
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hcat_client.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from hcat import hcat
+
+class HCatClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    hcat()
+
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+
+if __name__ == "__main__":
+  HCatClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hcat_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hcat_service_check.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hcat_service_check.py
new file mode 100644
index 0000000..5112e99
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hcat_service_check.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+def hcat_service_check():
+    import params
+
+    unique = get_unique_id_and_date()
+    output_file = format("/apps/hive/warehouse/hcatsmoke{unique}")
+    test_cmd = format("fs -test -e {output_file}")
+
+    if params.security_enabled:
+      kinit_cmd = format(
+        "{kinit_path_local} -kt {smoke_user_keytab} {smokeuser}; ")
+    else:
+      kinit_cmd = ""
+
+    File('/tmp/hcatSmoke.sh',
+         content=StaticFile("hcatSmoke.sh"),
+         mode=0755
+    )
+
+    prepare_cmd = format("{kinit_cmd}sh /tmp/hcatSmoke.sh hcatsmoke{unique} prepare")
+
+    Execute(prepare_cmd,
+            tries=3,
+            user=params.smokeuser,
+            try_sleep=5,
+            path=['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
+            logoutput=True)
+
+    ExecuteHadoop(test_cmd,
+                  user=params.hdfs_user,
+                  logoutput=True,
+                  conf_dir=params.hadoop_conf_dir)
+
+    cleanup_cmd = format("{kinit_cmd}sh /tmp/hcatSmoke.sh hcatsmoke{unique} cleanup")
+
+    Execute(cleanup_cmd,
+            tries=3,
+            user=params.smokeuser,
+            try_sleep=5,
+            path=['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
+            logoutput=True
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hive.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hive.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hive.py
new file mode 100644
index 0000000..b37ebb2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hive.py
@@ -0,0 +1,122 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import sys
+
+
+def hive(name=None):
+  import params
+
+  if name == 'metastore' or name == 'hiveserver2':
+    hive_config_dir = params.hive_server_conf_dir
+    config_file_mode = 0600
+    jdbc_connector()
+  else:
+    hive_config_dir = params.hive_conf_dir
+    config_file_mode = 0644
+
+  Directory(hive_config_dir,
+            owner=params.hive_user,
+            group=params.user_group,
+            recursive=True
+  )
+
+  XmlConfig("hive-site.xml",
+            conf_dir=hive_config_dir,
+            configurations=params.config['configurations']['hive-site'],
+            owner=params.hive_user,
+            group=params.user_group,
+            mode=config_file_mode
+  )
+
+  cmd = format("/bin/sh -c 'cd /usr/lib/ambari-agent/ && curl -kf --retry 5 "
+               "{jdk_location}{check_db_connection_jar_name} -o {check_db_connection_jar_name}'")
+
+  Execute(cmd,
+          not_if=format("[ -f {check_db_connection_jar_name}]"))
+
+  if name == 'metastore':
+    File(params.start_metastore_path,
+         mode=0755,
+         content=StaticFile('startMetastore.sh')
+    )
+
+  elif name == 'hiveserver2':
+    File(params.start_hiveserver2_path,
+         mode=0755,
+         content=StaticFile('startHiveserver2.sh')
+    )
+
+  if name != "client":
+    crt_directory(params.hive_pid_dir)
+    crt_directory(params.hive_log_dir)
+    crt_directory(params.hive_var_lib)
+
+  File(format("{hive_config_dir}/hive-env.sh"),
+       owner=params.hive_user,
+       group=params.user_group,
+       content=Template('hive-env.sh.j2', conf_dir=hive_config_dir)
+  )
+
+  crt_file(format("{hive_conf_dir}/hive-default.xml.template"))
+  crt_file(format("{hive_conf_dir}/hive-env.sh.template"))
+  crt_file(format("{hive_conf_dir}/hive-exec-log4j.properties.template"))
+  crt_file(format("{hive_conf_dir}/hive-log4j.properties.template"))
+
+
+def crt_directory(name):
+  import params
+
+  Directory(name,
+            recursive=True,
+            owner=params.hive_user,
+            group=params.user_group,
+            mode=0755)
+
+
+def crt_file(name):
+  import params
+
+  File(name,
+       owner=params.hive_user,
+       group=params.user_group
+  )
+
+
+def jdbc_connector():
+  import params
+
+  if params.hive_jdbc_driver == "com.mysql.jdbc.Driver":
+    cmd = format("hive mkdir -p {artifact_dir} ; cp /usr/share/java/{jdbc_jar_name} {target}")
+
+    Execute(cmd,
+            not_if=format("test -f {target}"),
+            creates=params.target,
+            path=["/bin", "usr/bin/"])
+
+  elif params.hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
+    cmd = format(
+      "mkdir -p {artifact_dir} ; curl -kf --retry 10 {driver_curl_source} -o {driver_curl_target} &&  "
+      "cp {driver_curl_target} {target}")
+
+    Execute(cmd,
+            not_if=format("test -f {target}"),
+            path=["/bin", "usr/bin/"])

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hive_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hive_client.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hive_client.py
new file mode 100644
index 0000000..0a5fb2b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hive_client.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+import sys
+from resource_management import *
+
+from hive import hive
+
+class HiveClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    hive(name='client')
+
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+  HiveClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hive_metastore.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hive_metastore.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hive_metastore.py
new file mode 100644
index 0000000..c741174
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hive_metastore.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from hive import hive
+from hive_service import hive_service
+
+class HiveMetastore(Script):
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    hive(name='metastore')
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    hive_service( 'metastore',
+                   action = 'start'
+    )
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    hive_service( 'metastore',
+                   action = 'stop'
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{hive_pid_dir}/{hive_metastore_pid}")
+    # Recursively check all existing gmetad pid files
+    check_process_status(pid_file)
+
+if __name__ == "__main__":
+  HiveMetastore().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hive_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hive_server.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hive_server.py
new file mode 100644
index 0000000..3ad81a1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hive_server.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from hive import hive
+from hive_service import hive_service
+
+class HiveServer(Script):
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    hive(name='hiveserver2')
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    hive_service( 'hiveserver2',
+                  action = 'start'
+    )
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    hive_service( 'hiveserver2',
+                  action = 'stop'
+    )
+
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{hive_pid_dir}/{hive_pid}")
+    # Recursively check all existing gmetad pid files
+    check_process_status(pid_file)
+
+if __name__ == "__main__":
+  HiveServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hive_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hive_service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hive_service.py
new file mode 100644
index 0000000..e8d4e5c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/hive_service.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+def hive_service(
+    name,
+    action='start'):
+
+  import params
+
+  if name == 'metastore':
+    pid_file = format("{hive_pid_dir}/{hive_metastore_pid}")
+    cmd = format(
+      "env HADOOP_HOME={hadoop_home} JAVA_HOME={java64_home} {start_metastore_path} {hive_log_dir}/hive.out {hive_log_dir}/hive.log {pid_file} {hive_server_conf_dir}")
+  elif name == 'hiveserver2':
+    pid_file = format("{hive_pid_dir}/{hive_pid}")
+    cmd = format(
+      "env JAVA_HOME={java64_home} {start_hiveserver2_path} {hive_log_dir}/hive-server2.out {hive_log_dir}/hive-server2.log {pid_file} {hive_server_conf_dir}")
+
+  if action == 'start':
+    demon_cmd = format("{cmd}")
+    no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
+    Execute(demon_cmd,
+            user=params.hive_user,
+            not_if=no_op_test
+    )
+
+    if params.hive_jdbc_driver == "com.mysql.jdbc.Driver" or params.hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
+      db_connection_check_command = format(
+        "{java64_home}/bin/java -cp {check_db_connection_jar}:/usr/share/java/{jdbc_jar_name} org.apache.ambari.server.DBConnectionVerification {hive_jdbc_connection_url} {hive_metastore_user_name} {hive_metastore_user_passwd} {hive_jdbc_driver}")
+      Execute(db_connection_check_command,
+              path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin')
+
+  elif action == 'stop':
+    demon_cmd = format("kill `cat {pid_file}` >/dev/null 2>&1 && rm -f {pid_file}")
+    Execute(demon_cmd)
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/mysql_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/mysql_server.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/mysql_server.py
new file mode 100644
index 0000000..a45d310
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/mysql_server.py
@@ -0,0 +1,77 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from mysql_service import mysql_service
+
+class MysqlServer(Script):
+
+  if System.get_instance().platform == "suse":
+    daemon_name = 'mysql'
+  else:
+    daemon_name = 'mysqld'
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    mysql_service(daemon_name=self.daemon_name, action='start')
+
+    File(params.mysql_adduser_path,
+         mode=0755,
+         content=StaticFile('addMysqlUser.sh')
+    )
+
+    # Autoescaping
+    cmd = ("bash", "-x", params.mysql_adduser_path, self.daemon_name,
+           params.hive_metastore_user_name, str(params.hive_metastore_user_passwd) , params.mysql_host[0])
+
+    Execute(cmd,
+            tries=3,
+            try_sleep=5,
+            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+            logoutput=True
+    )
+
+    mysql_service(daemon_name=self.daemon_name, action='stop')
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+
+    mysql_service(daemon_name=self.daemon_name, action = 'start')
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    mysql_service(daemon_name=self.daemon_name, action = 'stop')
+
+  def status(self, env):
+    mysql_service(daemon_name=self.daemon_name, action = 'status')
+
+if __name__ == "__main__":
+  MysqlServer().execute()


[04/37] AMBARI-4341. Rename 2.0.8 to 2.1.1 in the stack definition. (mahadev)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/scripts/storm.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/scripts/storm.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/scripts/storm.py
new file mode 100644
index 0000000..d38909c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/scripts/storm.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from yaml_config import yaml_config
+import sys
+
+def storm():
+  import params
+
+  Directory([params.log_dir, params.pid_dir, params.local_dir],
+            owner=params.storm_user,
+            group=params.user_group,
+            recursive=True
+  )
+
+  yaml_config( "storm.yaml",
+               conf_dir = params.conf_dir,
+               configurations = params.config['configurations']['storm-site'],
+               owner = params.storm_user,
+               group = params.user_group
+  )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/scripts/supervisor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/scripts/supervisor.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/scripts/supervisor.py
new file mode 100644
index 0000000..eafb48f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/scripts/supervisor.py
@@ -0,0 +1,63 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from yaml_config import yaml_config
+from storm import storm
+from service import service
+
+
+class Supervisor(Script):
+  def install(self, env):
+    self.install_packages(env)
+    # TODO remove
+    Execute("yum install http://s3.amazonaws.com/dev.hortonworks.com/storm/storm-0.9.1.2.0.6.1-1.el6.noarch.rpm -y",
+            ignore_failures = True)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    storm()
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env)
+
+    service("supervisor", action="start")
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    service("supervisor", action="stop")
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+
+    check_process_status(status_params.pid_supervisor)
+
+
+if __name__ == "__main__":
+  Supervisor().execute()
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/scripts/ui_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/scripts/ui_server.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/scripts/ui_server.py
new file mode 100644
index 0000000..58deec6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/scripts/ui_server.py
@@ -0,0 +1,62 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from storm import storm
+from service import service
+from service_check import ServiceCheck
+
+
+class UiServer(Script):
+  def install(self, env):
+    self.install_packages(env)
+    # TODO remove
+    Execute("yum install http://s3.amazonaws.com/dev.hortonworks.com/storm/storm-0.9.1.2.0.6.1-1.el6.noarch.rpm -y",
+            ignore_failures = True)
+
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    storm()
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env)
+
+    service("ui", action="start")
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    service("ui", action="stop")
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.pid_ui)
+
+if __name__ == "__main__":
+  UiServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/scripts/yaml_config.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/scripts/yaml_config.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/scripts/yaml_config.py
new file mode 100644
index 0000000..1f56486
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/scripts/yaml_config.py
@@ -0,0 +1,49 @@
+import re
+from resource_management import *
+
+def escape_yaml_propetry(value):
+  unquouted = False
+  unquouted_values = ["null","Null","NULL","true","True","TRUE","false","False","FALSE","YES","Yes","yes","NO","No","no","ON","On","on","OFF","Off","off"]
+  
+  if value in unquouted_values:
+    unquouted = True
+
+  # if is list [a,b,c]
+  if re.match('^\w*\[.+\]\w*$', value):
+    unquouted = True
+    
+  try:
+    int(value)
+    unquouted = True
+  except ValueError:
+    pass
+  
+  try:
+    float(value)
+    unquouted = True
+  except ValueError:
+    pass
+  
+  if not unquouted:
+    value = value.replace("'","''")
+    value = "'"+value+"'"
+    
+  return value
+
+def yaml_config(
+  filename,
+  configurations = None,
+  conf_dir = None,
+  mode = None,
+  owner = None,
+  group = None
+):
+    config_content = InlineTemplate('''{% for key, value in configurations_dict.items() %}{{ key }}: {{ escape_yaml_propetry(value) }}
+{% endfor %}''', configurations_dict=configurations, extra_imports=[escape_yaml_propetry])
+
+    File (format("{conf_dir}/{filename}"),
+      content = config_content,
+      owner = owner,
+      group = group,
+      mode = mode
+    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/configuration/webhcat-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/configuration/webhcat-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/configuration/webhcat-site.xml
new file mode 100644
index 0000000..39b901e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/configuration/webhcat-site.xml
@@ -0,0 +1,126 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!-- 
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+-->
+
+<!-- The default settings for Templeton. -->
+<!-- Edit templeton-site.xml to change settings for your local -->
+<!-- install. -->
+
+<configuration>
+
+  <property>
+    <name>templeton.port</name>
+      <value>50111</value>
+    <description>The HTTP port for the main server.</description>
+  </property>
+
+  <property>
+    <name>templeton.hadoop.conf.dir</name>
+    <value>/etc/hadoop/conf</value>
+    <description>The path to the Hadoop configuration.</description>
+  </property>
+
+  <property>
+    <name>templeton.jar</name>
+    <value>/usr/lib/hcatalog/share/webhcat/svr/webhcat.jar</value>
+    <description>The path to the Templeton jar file.</description>
+  </property>
+
+  <property>
+    <name>templeton.libjars</name>
+    <value>/usr/lib/zookeeper/zookeeper.jar</value>
+    <description>Jars to add the the classpath.</description>
+  </property>
+
+
+  <property>
+    <name>templeton.hadoop</name>
+    <value>/usr/bin/hadoop</value>
+    <description>The path to the Hadoop executable.</description>
+  </property>
+
+  <property>
+    <name>templeton.pig.archive</name>
+    <value>hdfs:///apps/webhcat/pig.tar.gz</value>
+    <description>The path to the Pig archive.</description>
+  </property>
+
+  <property>
+    <name>templeton.pig.path</name>
+    <value>pig.tar.gz/pig/bin/pig</value>
+    <description>The path to the Pig executable.</description>
+  </property>
+
+  <property>
+    <name>templeton.hcat</name>
+    <value>/usr/bin/hcat</value>
+    <description>The path to the hcatalog executable.</description>
+  </property>
+
+  <property>
+    <name>templeton.hive.archive</name>
+    <value>hdfs:///apps/webhcat/hive.tar.gz</value>
+    <description>The path to the Hive archive.</description>
+  </property>
+
+  <property>
+    <name>templeton.hive.path</name>
+    <value>hive.tar.gz/hive/bin/hive</value>
+    <description>The path to the Hive executable.</description>
+  </property>
+
+  <property>
+    <name>templeton.hive.properties</name>
+    <value></value>
+    <description>Properties to set when running hive.</description>
+  </property>
+
+
+  <property>
+    <name>templeton.zookeeper.hosts</name>
+    <value>localhost:2181</value>
+    <description>ZooKeeper servers, as comma separated host:port pairs</description>
+  </property>
+
+  <property>
+    <name>templeton.storage.class</name>
+    <value>org.apache.hive.hcatalog.templeton.tool.ZooKeeperStorage</value>
+    <description>The class to use as storage</description>
+  </property>
+
+  <property>
+   <name>templeton.override.enabled</name>
+   <value>false</value>
+   <description>
+     Enable the override path in templeton.override.jars
+   </description>
+ </property>
+
+ <property>
+    <name>templeton.streaming.jar</name>
+    <value>hdfs:///apps/webhcat/hadoop-streaming.jar</value>
+    <description>The hdfs path to the Hadoop streaming jar file.</description>
+  </property> 
+
+  <property>
+    <name>templeton.exec.timeout</name>
+    <value>60000</value>
+    <description>Time out for templeton api</description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/metainfo.xml
new file mode 100644
index 0000000..bf08814
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/metainfo.xml
@@ -0,0 +1,65 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>WEBHCAT</name>
+      <comment>This is comment for WEBHCAT service</comment>
+      <version>0.12.0.2.1.1</version>
+      <components>
+        <component>
+          <name>WEBHCAT_SERVER</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/webhcat_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>hcatalog</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>webhcat-tar-hive</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>webhcat-tar-pig</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+      <configuration-dependencies>
+        <config-type>webhcat-site</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/files/templetonSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/files/templetonSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/files/templetonSmoke.sh
new file mode 100644
index 0000000..cefc4f0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/files/templetonSmoke.sh
@@ -0,0 +1,95 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+export ttonhost=$1
+export smoke_test_user=$2
+export smoke_user_keytab=$3
+export security_enabled=$4
+export kinit_path_local=$5
+export ttonurl="http://${ttonhost}:50111/templeton/v1"
+
+if [[ $security_enabled == "true" ]]; then
+  kinitcmd="${kinit_path_local}  -kt ${smoke_user_keytab} ${smoke_test_user}; "
+else
+  kinitcmd=""
+fi
+
+cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>'    $ttonurl/status 2>&1"
+retVal=`su - ${smoke_test_user} -c "$cmd"`
+httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
+
+if [[ "$httpExitCode" -ne "200" ]] ; then
+  echo "Templeton Smoke Test (status cmd): Failed. : $retVal"
+  export TEMPLETON_EXIT_CODE=1
+  exit 1
+fi
+
+exit 0
+
+#try hcat ddl command
+echo "user.name=${smoke_test_user}&exec=show databases;" /tmp/show_db.post.txt
+cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>' -d  \@${destdir}/show_db.post.txt  $ttonurl/ddl 2>&1"
+retVal=`su - ${smoke_test_user} -c "$cmd"`
+httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
+
+if [[ "$httpExitCode" -ne "200" ]] ; then
+  echo "Templeton Smoke Test (ddl cmd): Failed. : $retVal"
+  export TEMPLETON_EXIT_CODE=1
+  exit  1
+fi
+
+# NOT SURE?? SUHAS
+if [[ $security_enabled == "true" ]]; then
+  echo "Templeton Pig Smoke Tests not run in secure mode"
+  exit 0
+fi
+
+#try pig query
+outname=${smoke_test_user}.`date +"%M%d%y"`.$$;
+ttonTestOutput="/tmp/idtest.${outname}.out";
+ttonTestInput="/tmp/idtest.${outname}.in";
+ttonTestScript="idtest.${outname}.pig"
+
+echo "A = load '$ttonTestInput' using PigStorage(':');"  > /tmp/$ttonTestScript
+echo "B = foreach A generate \$0 as id; " >> /tmp/$ttonTestScript
+echo "store B into '$ttonTestOutput';" >> /tmp/$ttonTestScript
+
+#copy pig script to hdfs
+su - ${smoke_test_user} -c "hadoop dfs -copyFromLocal /tmp/$ttonTestScript /tmp/$ttonTestScript"
+
+#copy input file to hdfs
+su - ${smoke_test_user} -c "hadoop dfs -copyFromLocal /etc/passwd $ttonTestInput"
+
+#create, copy post args file
+echo -n "user.name=${smoke_test_user}&file=/tmp/$ttonTestScript" > /tmp/pig_post.txt
+
+#submit pig query
+cmd="curl -s -w 'http_code <%{http_code}>' -d  \@${destdir}/pig_post.txt  $ttonurl/pig 2>&1"
+retVal=`su - ${smoke_test_user} -c "$cmd"`
+httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
+if [[ "$httpExitCode" -ne "200" ]] ; then
+  echo "Templeton Smoke Test (pig cmd): Failed. : $retVal"
+  export TEMPLETON_EXIT_CODE=1
+  exit 1
+fi
+
+exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/__init__.py
new file mode 100644
index 0000000..a582077
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/__init__.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/params.py
new file mode 100644
index 0000000..60b52a7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/params.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+import status_params
+
+# server configurations
+config = Script.get_config()
+
+webhcat_user = config['configurations']['global']['webhcat_user']
+download_url = config['configurations']['global']['apache_artifacts_download_url']
+
+config_dir = '/etc/hcatalog/conf'
+
+templeton_log_dir = config['configurations']['global']['hcat_log_dir']
+templeton_pid_dir = status_params.templeton_pid_dir
+
+pid_file = status_params.pid_file
+
+hadoop_conf_dir = config['configurations']['webhcat-site']['templeton.hadoop.conf.dir']
+templeton_jar = config['configurations']['webhcat-site']['templeton.jar']
+
+hadoop_home = '/usr'
+user_group = config['configurations']['global']['user_group']
+
+webhcat_server_host = config['clusterHostInfo']['webhcat_server_host']
+
+webhcat_apps_dir = "/apps/webhcat"
+smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
+smokeuser = config['configurations']['global']['smokeuser']
+security_enabled = config['configurations']['global']['security_enabled']
+kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/service_check.py
new file mode 100644
index 0000000..58b4d25
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/service_check.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+from resource_management import *
+
+class WebHCatServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    File('/tmp/templetonSmoke.sh',
+         content= StaticFile('templetonSmoke.sh'),
+         mode=0755
+    )
+
+    cmd = format("sh /tmp/templetonSmoke.sh {webhcat_server_host[0]} {smokeuser} {smokeuser_keytab}"
+                 " {security_enabled} {kinit_path_local}",
+                 smokeuser_keytab=params.smoke_user_keytab if params.security_enabled else "no_keytab")
+
+    Execute(cmd,
+            tries=3,
+            try_sleep=5,
+            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+            logoutput=True)
+
+if __name__ == "__main__":
+  WebHCatServiceCheck().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/status_params.py
new file mode 100644
index 0000000..21dde6f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/status_params.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+templeton_pid_dir = config['configurations']['global']['hcat_pid_dir']
+pid_file = format('{templeton_pid_dir}/webhcat.pid')

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/webhcat.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/webhcat.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/webhcat.py
new file mode 100644
index 0000000..c013624
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/webhcat.py
@@ -0,0 +1,120 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+from resource_management import *
+import sys
+
+
+def webhcat():
+  import params
+
+  Directory(params.templeton_pid_dir,
+            owner=params.webhcat_user,
+            mode=0755,
+            group=params.user_group,
+            recursive=True)
+
+  Directory(params.templeton_log_dir,
+            owner=params.webhcat_user,
+            mode=0755,
+            group=params.user_group,
+            recursive=True)
+
+  Directory(params.config_dir,
+            owner=params.webhcat_user,
+            group=params.user_group)
+
+  XmlConfig("webhcat-site.xml",
+            conf_dir=params.config_dir,
+            configurations=params.config['configurations']['webhcat-site'],
+            owner=params.webhcat_user,
+            group=params.user_group,
+  )
+
+  File(format("{config_dir}/webhcat-env.sh"),
+       owner=params.webhcat_user,
+       group=params.user_group,
+       content=Template('webhcat-env.sh.j2')
+  )
+
+  if params.security_enabled:
+    kinit_if_needed = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser};")
+  else:
+    kinit_if_needed = ""
+
+  if kinit_if_needed:
+    Execute(kinit_if_needed,
+            user=params.webhcat_user,
+            path='/bin'
+    )
+
+  copyFromLocal(path='/usr/lib/hadoop-mapreduce/hadoop-streaming*.jar',
+                owner=params.webhcat_user,
+                mode=0755,
+                dest_dir=format("{webhcat_apps_dir}/hadoop-streaming.jar"),
+                kinnit_if_needed=kinit_if_needed
+  )
+
+  copyFromLocal(path='/usr/share/HDP-webhcat/pig.tar.gz',
+                owner=params.webhcat_user,
+                mode=0755,
+                dest_dir=format("{webhcat_apps_dir}/pig.tar.gz"),
+  )
+
+  copyFromLocal(path='/usr/share/HDP-webhcat/hive.tar.gz',
+                owner=params.webhcat_user,
+                mode=0755,
+                dest_dir=format("{webhcat_apps_dir}/hive.tar.gz")
+  )
+
+
+def copyFromLocal(path=None, owner=None, group=None, mode=None, dest_dir=None, kinnit_if_needed=""):
+  import params
+
+  copy_cmd = format("fs -copyFromLocal {path} {dest_dir}")
+  unless_cmd = format("{kinnit_if_needed} hadoop fs -ls {dest_dir} >/dev/null 2>&1")
+
+  ExecuteHadoop(copy_cmd,
+                not_if=unless_cmd,
+                user=owner,
+                conf_dir=params.hadoop_conf_dir)
+
+  if not owner:
+    chown = None
+  else:
+    if not group:
+      chown = owner
+    else:
+      chown = format('{owner}:{group}')
+
+  if not chown:
+    chown_cmd = format("fs -chown {chown} {dest_dir}")
+
+    ExecuteHadoop(copy_cmd,
+                  user=owner,
+                  conf_dir=params.hadoop_conf_dir)
+
+  if not mode:
+    chmod_cmd = format('fs -chmod {mode} {dest_dir}')
+
+    ExecuteHadoop(chmod_cmd,
+                  user=owner,
+                  conf_dir=params.hadoop_conf_dir)

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/webhcat_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/webhcat_server.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/webhcat_server.py
new file mode 100644
index 0000000..4365111
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/webhcat_server.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+import sys
+from resource_management import *
+
+from webhcat import webhcat
+from webhcat_service import webhcat_service
+
+class WebHCatServer(Script):
+  def install(self, env):
+    self.install_packages(env)
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    webhcat()
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    webhcat_service(action = 'start')
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    webhcat_service(action = 'stop')
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.pid_file)
+
+if __name__ == "__main__":
+  WebHCatServer().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/webhcat_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/webhcat_service.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/webhcat_service.py
new file mode 100644
index 0000000..12c3854
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/scripts/webhcat_service.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+from resource_management import *
+
+def webhcat_service(action='start'):
+  import params
+
+  cmd = format('env HADOOP_HOME={hadoop_home} /usr/lib/hcatalog/sbin/webhcat_server.sh')
+
+  if action == 'start':
+    demon_cmd = format('{cmd} start')
+    no_op_test = format('ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1')
+    Execute(demon_cmd,
+            user=params.webhcat_user,
+            not_if=no_op_test
+    )
+  elif action == 'stop':
+    demon_cmd = format('{cmd} stop')
+    Execute(demon_cmd,
+            user=params.webhcat_user
+    )
+    Execute(format('rm -f {pid_file}'))

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/templates/webhcat-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/templates/webhcat-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/templates/webhcat-env.sh.j2
new file mode 100644
index 0000000..9ea4a79
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/WEBHCAT/package/templates/webhcat-env.sh.j2
@@ -0,0 +1,44 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+# The file containing the running pid
+PID_FILE={{pid_file}}
+
+TEMPLETON_LOG_DIR={{templeton_log_dir}}/
+
+
+WEBHCAT_LOG_DIR={{templeton_log_dir}}/
+
+# The console error log
+ERROR_LOG={{templeton_log_dir}}/webhcat-console-error.log
+
+# The console log
+CONSOLE_LOG={{templeton_log_dir}}/webhcat-console.log
+
+#TEMPLETON_JAR=templeton_jar_name
+
+#HADOOP_PREFIX=hadoop_prefix
+
+#HCAT_PREFIX=hive_prefix
+
+# Set HADOOP_HOME to point to a specific hadoop install directory
+export HADOOP_HOME=/usr/lib/hadoop

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/capacity-scheduler.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/capacity-scheduler.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/capacity-scheduler.xml
new file mode 100644
index 0000000..4a19779
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/capacity-scheduler.xml
@@ -0,0 +1,128 @@
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<configuration>
+
+  <property>
+    <name>yarn.scheduler.capacity.maximum-applications</name>
+    <value>10000</value>
+    <description>
+      Maximum number of applications that can be pending and running.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.maximum-am-resource-percent</name>
+    <value>0.2</value>
+    <description>
+      Maximum percent of resources in the cluster which can be used to run 
+      application masters i.e. controls number of concurrent running
+      applications.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.queues</name>
+    <value>default</value>
+    <description>
+      The queues at the this level (root is the root queue).
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.capacity</name>
+    <value>100</value>
+    <description>
+      The total capacity as a percentage out of 100 for this queue.
+      If it has child queues then this includes their capacity as well.
+      The child queues capacity should add up to their parent queue's capacity
+      or less.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.capacity</name>
+    <value>100</value>
+    <description>Default queue target capacity.</description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.user-limit-factor</name>
+    <value>1</value>
+    <description>
+      Default queue user limit a percentage from 0.0 to 1.0.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.maximum-capacity</name>
+    <value>100</value>
+    <description>
+      The maximum capacity of the default queue. 
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.state</name>
+    <value>RUNNING</value>
+    <description>
+      The state of the default queue. State can be one of RUNNING or STOPPED.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.acl_submit_jobs</name>
+    <value>*</value>
+    <description>
+      The ACL of who can submit jobs to the default queue.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.default.acl_administer_jobs</name>
+    <value>*</value>
+    <description>
+      The ACL of who can administer jobs on the default queue.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.capacity.root.acl_administer_queues</name>
+    <value>*</value>
+    <description>
+      The ACL for who can administer this queue i.e. change sub-queue 
+      allocations.
+    </description>
+  </property>
+  
+  <property>
+    <name>yarn.scheduler.capacity.root.unfunded.capacity</name>
+    <value>50</value>
+    <description>
+      No description
+    </description>
+  </property>
+  <property>
+    <name>yarn.scheduler.capacity.node-locality-delay</name>
+    <value>40</value>
+    <description>
+      No description
+    </description>
+  </property>
+
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/core-site.xml
new file mode 100644
index 0000000..3a2af49
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/core-site.xml
@@ -0,0 +1,20 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<configuration>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/global.xml
new file mode 100644
index 0000000..429c39f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/global.xml
@@ -0,0 +1,88 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>rm_host</name>
+    <value></value>
+    <description>ResourceManager.</description>
+  </property>
+  <property>
+    <name>nm_hosts</name>
+    <value></value>
+    <description>List of NodeManager Hosts.</description>
+  </property>
+  <property>
+    <name>yarn_log_dir_prefix</name>
+    <value>/var/log/hadoop-yarn</value>
+    <description>YARN Log Dir Prefix</description>
+  </property>
+  <property>
+    <name>yarn_pid_dir_prefix</name>
+    <value>/var/run/hadoop-yarn</value>
+    <description>YARN PID Dir Prefix</description>
+  </property>
+  <property>
+    <name>yarn_user</name>
+    <value>yarn</value>
+    <description>YARN User</description>
+  </property>
+  <property>
+    <name>yarn_heapsize</name>
+    <value>1024</value>
+    <description>Max heapsize for all YARN components using a numerical value in the scale of MB</description>
+  </property>
+  <property>
+    <name>resourcemanager_heapsize</name>
+    <value>1024</value>
+    <description>Max heapsize for ResourceManager using a numerical value in the scale of MB</description>
+  </property>
+  <property>
+    <name>nodemanager_heapsize</name>
+    <value>1024</value>
+    <description>Max heapsize for NodeManager using a numerical value in the scale of MB</description>
+  </property>
+
+  <!--MAPREDUCE2-->
+
+  <property>
+    <name>hs_host</name>
+    <value></value>
+    <description>History Server.</description>
+  </property>
+  <property>
+    <name>mapred_log_dir_prefix</name>
+    <value>/var/log/hadoop-mapreduce</value>
+    <description>Mapreduce Log Dir Prefix</description>
+  </property>
+  <property>
+    <name>mapred_pid_dir_prefix</name>
+    <value>/var/run/hadoop-mapreduce</value>
+    <description>Mapreduce PID Dir Prefix</description>
+  </property>
+  <property>
+    <name>mapred_user</name>
+    <value>mapred</value>
+    <description>Mapreduce User</description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/mapred-queue-acls.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/mapred-queue-acls.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/mapred-queue-acls.xml
new file mode 100644
index 0000000..ce12380
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/mapred-queue-acls.xml
@@ -0,0 +1,39 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- mapred-queue-acls.xml -->
+<configuration>
+
+
+<!-- queue default -->
+
+  <property>
+    <name>mapred.queue.default.acl-submit-job</name>
+    <value>*</value>
+  </property>
+
+  <property>
+    <name>mapred.queue.default.acl-administer-jobs</name>
+    <value>*</value>
+  </property>
+
+  <!-- END ACLs -->
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/mapred-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/mapred-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/mapred-site.xml
new file mode 100644
index 0000000..424d216
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/mapred-site.xml
@@ -0,0 +1,381 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
+
+<!-- i/o properties -->
+
+  <property>
+    <name>mapreduce.task.io.sort.mb</name>
+    <value>200</value>
+    <description>
+      The total amount of buffer memory to use while sorting files, in megabytes.
+      By default, gives each merge stream 1MB, which should minimize seeks.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.map.sort.spill.percent</name>
+    <value>0.7</value>
+    <description>
+      The soft limit in the serialization buffer. Once reached, a thread will
+      begin to spill the contents to disk in the background. Note that
+      collection will not block if this threshold is exceeded while a spill
+      is already in progress, so spills may be larger than this threshold when
+      it is set to less than .5
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.task.io.sort.factor</name>
+    <value>100</value>
+    <description>
+      The number of streams to merge at once while sorting files.
+      This determines the number of open file handles.
+    </description>
+  </property>
+
+<!-- map/reduce properties -->
+  <property>
+    <name>mapreduce.cluster.administrators</name>
+    <value> hadoop</value>
+    <description>
+      Administrators for MapReduce applications.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.reduce.shuffle.parallelcopies</name>
+    <value>30</value>
+    <description>
+      The default number of parallel transfers run by reduce during
+      the copy(shuffle) phase.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.map.speculative</name>
+    <value>false</value>
+    <description>
+      If true, then multiple instances of some map tasks
+      may be executed in parallel.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.reduce.speculative</name>
+    <value>false</value>
+    <description>
+      If true, then multiple instances of some reduce tasks may be
+      executed in parallel.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.job.reduce.slowstart.completedmaps</name>
+    <value>0.05</value>
+    <description>
+      Fraction of the number of maps in the job which should be complete before
+      reduces are scheduled for the job.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.reduce.shuffle.merge.percent</name>
+    <value>0.66</value>
+    <description>
+      The usage threshold at which an in-memory merge will be
+      initiated, expressed as a percentage of the total memory allocated to
+      storing in-memory map outputs, as defined by
+      mapreduce.reduce.shuffle.input.buffer.percent.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.reduce.shuffle.input.buffer.percent</name>
+    <value>0.7</value>
+    <description>
+      The percentage of memory to be allocated from the maximum heap
+      size to storing map outputs during the shuffle.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.map.output.compress.codec</name>
+    <value></value>
+    <description>If the map outputs are compressed, how should they be
+      compressed
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.output.fileoutputformat.compress.type</name>
+    <value>BLOCK</value>
+    <description>
+      If the job outputs are to compressed as SequenceFiles, how should
+      they be compressed? Should be one of NONE, RECORD or BLOCK.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.reduce.input.buffer.percent</name>
+    <value>0.0</value>
+    <description>
+      The percentage of memory- relative to the maximum heap size- to
+      retain map outputs during the reduce. When the shuffle is concluded, any
+      remaining map outputs in memory must consume less than this threshold before
+      the reduce can begin.
+    </description>
+  </property>
+
+  <!-- copied from kryptonite configuration -->
+  <property>
+    <name>mapreduce.map.output.compress</name>
+    <value>false</value>
+  </property>
+
+  <property>
+    <name>mapreduce.task.timeout</name>
+    <value>300000</value>
+    <description>
+      The number of milliseconds before a task will be
+      terminated if it neither reads an input, writes an output, nor
+      updates its status string.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.map.memory.mb</name>
+    <value>1024</value>
+    <description>Virtual memory for single Map task</description>
+  </property>
+
+  <property>
+    <name>mapreduce.reduce.memory.mb</name>
+    <value>1024</value>
+    <description>Virtual memory for single Reduce task</description>
+  </property>
+
+  <property>
+    <name>mapreduce.jobhistory.keytab.file</name>
+    <!-- cluster variant -->
+    <value></value>
+    <description>The keytab for the job history server principal.</description>
+  </property>
+
+  <property>
+    <name>mapreduce.shuffle.port</name>
+    <value>13562</value>
+    <description>
+      Default port that the ShuffleHandler will run on.
+      ShuffleHandler is a service run at the NodeManager to facilitate
+      transfers of intermediate Map outputs to requesting Reducers.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.jobhistory.intermediate-done-dir</name>
+    <value>/mr-history/tmp</value>
+    <description>
+      Directory where history files are written by MapReduce jobs.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.jobhistory.done-dir</name>
+    <value>/mr-history/done</value>
+    <description>
+      Directory where history files are managed by the MR JobHistory Server.
+    </description>
+  </property>
+
+  <property>       
+    <name>mapreduce.jobhistory.address</name>
+    <value>localhost:10020</value>
+    <description>Enter your JobHistoryServer hostname.</description>
+  </property>
+
+  <property>       
+    <name>mapreduce.jobhistory.webapp.address</name>
+    <value>localhost:19888</value>
+    <description>Enter your JobHistoryServer hostname.</description>
+  </property>
+
+  <property>
+    <name>mapreduce.framework.name</name>
+    <value>yarn</value>
+    <description>
+      The runtime framework for executing MapReduce jobs. Can be one of local,
+      classic or yarn.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.app.mapreduce.am.staging-dir</name>
+    <value>/user</value>
+    <description>
+      The staging dir used while submitting jobs.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.app.mapreduce.am.resource.mb</name>
+    <value>512</value>
+    <description>The amount of memory the MR AppMaster needs.</description>
+  </property>
+
+  <property>
+    <name>yarn.app.mapreduce.am.command-opts</name>
+    <value>-Xmx312m</value>
+    <description>
+      Java opts for the MR App Master processes.
+      The following symbol, if present, will be interpolated: @taskid@ is replaced
+      by current TaskID. Any other occurrences of '@' will go unchanged.
+      For example, to enable verbose gc logging to a file named for the taskid in
+      /tmp and to set the heap maximum to be a gigabyte, pass a 'value' of:
+      -Xmx1024m -verbose:gc -Xloggc:/tmp/@taskid@.gc
+
+      Usage of -Djava.library.path can cause programs to no longer function if
+      hadoop native libraries are used. These values should instead be set as part
+      of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
+      mapreduce.reduce.env config settings.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.app.mapreduce.am.admin-command-opts</name>
+    <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
+    <description>
+      Java opts for the MR App Master processes for admin purposes.
+      It will appears before the opts set by yarn.app.mapreduce.am.command-opts and
+      thus its options can be overridden user.
+
+      Usage of -Djava.library.path can cause programs to no longer function if
+      hadoop native libraries are used. These values should instead be set as part
+      of LD_LIBRARY_PATH in the map / reduce JVM env using the mapreduce.map.env and
+      mapreduce.reduce.env config settings.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.app.mapreduce.am.log.level</name>
+    <value>INFO</value>
+    <description>MR App Master process log level.</description>
+  </property>
+
+  <property>
+    <name>yarn.app.mapreduce.am.env</name>
+    <value></value>
+    <description>
+      User added environment variables for the MR App Master
+      processes. Example :
+      1) A=foo  This will set the env variable A to foo
+      2) B=$B:c This is inherit tasktracker's B env variable.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.admin.map.child.java.opts</name>
+    <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
+  </property>
+
+  <property>
+    <name>mapreduce.admin.reduce.child.java.opts</name>
+    <value>-Djava.net.preferIPv4Stack=true -Dhadoop.metrics.log.level=WARN</value>
+  </property>
+
+  <property>
+    <name>mapreduce.application.classpath</name>
+    <value>$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/*,$HADOOP_MAPRED_HOME/share/hadoop/mapreduce/lib/*</value>
+    <description>
+      CLASSPATH for MR applications. A comma-separated list of CLASSPATH
+      entries.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.am.max-attempts</name>
+    <value>2</value>
+    <description>
+      The maximum number of application attempts. It is a
+      application-specific setting. It should not be larger than the global number
+      set by resourcemanager. Otherwise, it will be override. The default number is
+      set to 2, to allow at least one retry for AM.
+    </description>
+  </property>
+
+
+
+  <property>
+    <name>mapreduce.map.java.opts</name>
+    <value>-Xmx756m</value>
+    <description>
+      Larger heap-size for child jvms of maps.
+    </description>
+  </property>
+
+
+  <property>
+    <name>mapreduce.reduce.java.opts</name>
+    <value>-Xmx756m</value>
+    <description>
+      Larger heap-size for child jvms of reduces.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.map.log.level</name>
+    <value>INFO</value>
+    <description>
+      The logging level for the map task. The allowed levels are:
+      OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.reduce.log.level</name>
+    <value>INFO</value>
+    <description>
+      The logging level for the reduce task. The allowed levels are:
+      OFF, FATAL, ERROR, WARN, INFO, DEBUG, TRACE and ALL.
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.admin.user.env</name>
+    <value>LD_LIBRARY_PATH=/usr/lib/hadoop/lib/native:/usr/lib/hadoop/lib/native/`$JAVA_HOME/bin/java -d32 -version &amp;&gt; /dev/null;if [ $? -eq 0 ]; then echo Linux-i386-32; else echo Linux-amd64-64;fi`</value>
+    <description>
+      Additional execution environment entries for map and reduce task processes.
+      This is not an additive property. You must preserve the original value if
+      you want your map and reduce tasks to have access to native libraries (compression, etc)
+    </description>
+  </property>
+
+  <property>
+    <name>mapreduce.output.fileoutputformat.compress</name>
+    <value>false</value>
+    <description>
+      Should the job outputs be compressed?
+    </description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/yarn-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/yarn-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/yarn-site.xml
new file mode 100644
index 0000000..7d4d4fb
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/configuration/yarn-site.xml
@@ -0,0 +1,337 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
+
+  <!-- ResourceManager -->
+
+  <property>
+    <name>yarn.resourcemanager.hostname</name>
+    <value>localhost</value>
+    <description>The hostname of the RM.</description>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.resource-tracker.address</name>
+    <value>localhost:8025</value>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.scheduler.address</name>
+    <value>localhost:8030</value>
+    <description>The address of the scheduler interface.</description>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.address</name>
+    <value>localhost:8050</value>
+    <description>
+      The address of the applications manager interface in the
+      RM.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.admin.address</name>
+    <value>localhost:8141</value>
+    <description>The address of the RM admin interface.</description>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.scheduler.class</name>
+    <value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value>
+    <description>The class to use as the resource scheduler.</description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.minimum-allocation-mb</name>
+    <value>512</value>
+    <description>
+      TThe minimum allocation for every container request at the RM,
+      in MBs. Memory requests lower than this won't take effect,
+      and the specified value will get allocated at minimum.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.scheduler.maximum-allocation-mb</name>
+    <value>2048</value>
+    <description>
+      The maximum allocation for every container request at the RM,
+      in MBs. Memory requests higher than this won't take effect,
+      and will get capped to this value.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.acl.enable</name>
+    <value>true</value>
+  </property>
+
+  <property>
+    <name>yarn.admin.acl</name>
+    <value>*</value>
+  </property>
+
+  <!-- NodeManager -->
+
+  <property>
+    <name>yarn.nodemanager.address</name>
+    <value>0.0.0.0:45454</value>
+    <description>The address of the container manager in the NM.</description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.resource.memory-mb</name>
+    <value>5120</value>
+    <description>Amount of physical memory, in MB, that can be allocated
+      for containers.</description>
+  </property>
+
+  <property>
+    <name>yarn.application.classpath</name>
+    <value>/etc/hadoop/conf,/usr/lib/hadoop/*,/usr/lib/hadoop/lib/*,/usr/lib/hadoop-hdfs/*,/usr/lib/hadoop-hdfs/lib/*,/usr/lib/hadoop-yarn/*,/usr/lib/hadoop-yarn/lib/*,/usr/lib/hadoop-mapreduce/*,/usr/lib/hadoop-mapreduce/lib/*</value>
+    <description>Classpath for typical applications.</description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.vmem-pmem-ratio</name>
+    <value>2.1</value>
+    <description>Ratio between virtual memory to physical memory when
+      setting memory limits for containers. Container allocations are
+      expressed in terms of physical memory, and virtual memory usage
+      is allowed to exceed this allocation by this ratio.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.container-executor.class</name>
+    <value>org.apache.hadoop.yarn.server.nodemanager.DefaultContainerExecutor</value>
+    <description>ContainerExecutor for launching containers</description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.linux-container-executor.group</name>
+    <value>hadoop</value>
+    <description>Unix group of the NodeManager</description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.aux-services</name>
+    <value>mapreduce_shuffle</value>
+    <description>Auxilliary services of NodeManager. A valid service name should only contain a-zA-Z0-9_ and can
+      not start with numbers</description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name>
+    <value>org.apache.hadoop.mapred.ShuffleHandler</value>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.log-dirs</name>
+    <value>/hadoop/yarn/log</value>
+    <description>
+      Where to store container logs. An application's localized log directory
+      will be found in ${yarn.nodemanager.log-dirs}/application_${appid}.
+      Individual containers' log directories will be below this, in directories
+      named container_{$contid}. Each container directory will contain the files
+      stderr, stdin, and syslog generated by that container.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.local-dirs</name>
+    <value>/hadoop/yarn/local</value>
+    <description>
+      List of directories to store localized files in. An
+      application's localized file directory will be found in:
+      ${yarn.nodemanager.local-dirs}/usercache/${user}/appcache/application_${appid}.
+      Individual containers' work directories, called container_${contid}, will
+      be subdirectories of this.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.container-monitor.interval-ms</name>
+    <value>3000</value>
+    <description>
+      The interval, in milliseconds, for which the node manager
+      waits  between two cycles of monitoring its containers' memory usage.
+    </description>
+  </property>
+
+  <!--
+  <property>
+    <name>yarn.nodemanager.health-checker.script.path</name>
+    <value>/etc/hadoop/conf/health_check_nodemanager</value>
+    <description>The health check script to run.</description>
+  </property>
+   -->
+
+  <property>
+    <name>yarn.nodemanager.health-checker.interval-ms</name>
+    <value>135000</value>
+    <description>Frequency of running node health script.</description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.health-checker.script.timeout-ms</name>
+    <value>60000</value>
+    <description>Script time out period.</description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.log.retain-second</name>
+    <value>604800</value>
+    <description>
+      Time in seconds to retain user logs. Only applicable if
+      log aggregation is disabled.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.log-aggregation-enable</name>
+    <value>true</value>
+    <description>Whether to enable log aggregation</description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.remote-app-log-dir</name>
+    <value>/app-logs</value>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.remote-app-log-dir-suffix</name>
+    <value>logs</value>
+    <description>
+      The remote log dir will be created at
+      {yarn.nodemanager.remote-app-log-dir}/${user}/{thisParam}.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.log-aggregation.compression-type</name>
+    <value>gz</value>
+    <description>
+      T-file compression types used to compress aggregated logs.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.delete.debug-delay-sec</name>
+    <value>0</value>
+    <description>
+      Number of seconds after an application finishes before the nodemanager's
+      DeletionService will delete the application's localized file directory
+      and log directory.
+
+      To diagnose Yarn application problems, set this property's value large
+      enough (for example, to 600 = 10 minutes) to permit examination of these
+      directories. After changing the property's value, you must restart the
+      nodemanager in order for it to have an effect.
+
+      The roots of Yarn applications' work directories is configurable with
+      the yarn.nodemanager.local-dirs property (see below), and the roots
+      of the Yarn applications' log directories is configurable with the
+      yarn.nodemanager.log-dirs property (see also below).
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.log-aggregation.retain-seconds</name>
+    <value>2592000</value>
+    <description>
+      How long to keep aggregation logs before deleting them. -1 disables.
+      Be careful set this too small and you will spam the name node.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.admin-env</name>
+    <value>MALLOC_ARENA_MAX=$MALLOC_ARENA_MAX</value>
+    <description>
+      Environment variables that should be forwarded from the NodeManager's
+      environment to the container's.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.disk-health-checker.min-healthy-disks</name>
+    <value>0.25</value>
+    <description>
+      The minimum fraction of number of disks to be healthy for the nodemanager
+      to launch new containers. This correspond to both
+      yarn-nodemanager.local-dirs and yarn.nodemanager.log-dirs. i.e.
+      If there are less number of healthy local-dirs (or log-dirs) available,
+      then new containers will not be launched on this node.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.am.max-attempts</name>
+    <value>2</value>
+    <description>
+      The maximum number of application attempts. It's a global
+      setting for all application masters. Each application master can specify
+      its individual maximum number of application attempts via the API, but the
+      individual number cannot be more than the global upper bound. If it is,
+      the resourcemanager will override it. The default number is set to 2, to
+      allow at least one retry for AM.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.webapp.address</name>
+    <value>localhost:8088</value>
+    <description>
+      The address of the RM web application.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.nodemanager.vmem-check-enabled</name>
+    <value>false</value>
+    <description>
+      Whether virtual memory limits will be enforced for containers.
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.log.server.url</name>
+    <value>http://localhost:19888/jobhistory/logs</value>
+    <description>
+      URI for the HistoryServer's log resource
+    </description>
+  </property>
+
+  <property>
+    <name>yarn.resourcemanager.nodes.exclude-path</name>
+    <value>/etc/hadoop/conf/yarn.exclude</value>
+    <description>
+      Names a file that contains a list of hosts that are
+      not permitted to connect to the resource manager.  The full pathname of the
+      file must be specified.  If the value is empty, no hosts are
+      excluded.
+    </description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/metainfo.xml
new file mode 100644
index 0000000..2c2169c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/metainfo.xml
@@ -0,0 +1,172 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>YARN</name>
+      <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
+      <version>2.1.0.2.1.1</version>
+      <components>
+
+        <component>
+          <name>RESOURCEMANAGER</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/resourcemanager.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <customCommands>
+            <customCommand>
+              <name>DECOMMISSION</name>
+              <commandScript>
+                <script>scripts/resourcemanager.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+          </customCommands>
+        </component>
+
+        <component>
+          <name>NODEMANAGER</name>
+          <category>SLAVE</category>
+          <commandScript>
+            <script>scripts/nodemanager.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>YARN_CLIENT</name>
+          <category>CLIENT</category>
+          <commandScript>
+            <script>scripts/yarn_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-yarn</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-yarn-nodemanager</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-mapreduce</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-yarn-proxyserver</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-yarn-resourcemanager</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>yarn-site</config-type>
+        <config-type>capacity-scheduler</config-type>
+        <config-type>core-site</config-type>
+        <config-type>global</config-type>
+        <config-type>mapred-site</config-type>
+        <config-type>mapred-queue-acls</config-type>
+      </configuration-dependencies>
+    </service>
+
+    <service>
+      <name>MAPREDUCE2</name>
+      <comment>Apache Hadoop NextGen MapReduce (YARN)</comment>
+      <version>2.1.0.2.0.6.0</version>
+      <components>
+        <component>
+          <name>HISTORYSERVER</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/historyserver.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>MAPREDUCE2_CLIENT</name>
+          <category>CLIENT</category>
+          <commandScript>
+            <script>scripts/mapreduce2_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-mapreduce</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-mapreduce-historyserver</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/mapred_service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>yarn-site</config-type>
+        <config-type>capacity-scheduler</config-type>
+        <config-type>core-site</config-type>
+        <config-type>global</config-type>
+        <config-type>mapred-site</config-type>
+        <config-type>mapred-queue-acls</config-type>
+      </configuration-dependencies>
+    </service>
+
+  </services>
+</metainfo>


[19/37] AMBARI-4341. Rename 2.0.8 to 2.1.1 in the stack definition. (mahadev)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/package/scripts/params.py
deleted file mode 100644
index 1ad1412..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/package/scripts/params.py
+++ /dev/null
@@ -1,79 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import status_params
-
-# server configurations
-config = Script.get_config()
-
-oozie_user = config['configurations']['global']['oozie_user']
-smokeuser = config['configurations']['global']['smokeuser']
-conf_dir = "/etc/oozie/conf"
-hadoop_conf_dir = "/etc/hadoop/conf"
-user_group = config['configurations']['global']['user_group']
-jdk_location = config['hostLevelParams']['jdk_location']
-check_db_connection_jar_name = "DBConnectionVerification.jar"
-check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
-hadoop_prefix = "/usr"
-oozie_tmp_dir = "/var/tmp/oozie"
-oozie_hdfs_user_dir = format("/user/{oozie_user}")
-oozie_pid_dir = status_params.oozie_pid_dir
-pid_file = status_params.pid_file
-hadoop_jar_location = "/usr/lib/hadoop/"
-# for HDP1 it's "/usr/share/HDP-oozie/ext.zip"
-ext_js_path = "/usr/share/HDP-oozie/ext-2.2.zip"
-oozie_libext_dir = "/usr/lib/oozie/libext"
-lzo_enabled = config['configurations']['global']['lzo_enabled']
-security_enabled = config['configurations']['global']['security_enabled']
-
-kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-oozie_service_keytab = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.keytab.file']
-oozie_principal = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.kerberos.principal']
-smokeuser_keytab = config['configurations']['global']['smokeuser_keytab']
-oozie_keytab = config['configurations']['global']['oozie_keytab']
-
-oracle_driver_jar_name = "ojdbc6.jar"
-java_share_dir = "/usr/share/java"
-
-java_home = config['hostLevelParams']['java_home']
-oozie_metastore_user_name = config['configurations']['oozie-site']['oozie.service.JPAService.jdbc.username']
-oozie_metastore_user_passwd = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.password","")
-oozie_jdbc_connection_url = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.url", "")
-oozie_log_dir = config['configurations']['global']['oozie_log_dir']
-oozie_data_dir = config['configurations']['global']['oozie_data_dir']
-oozie_lib_dir = "/var/lib/oozie/"
-oozie_webapps_dir = "/var/lib/oozie/oozie-server/webapps/"
-
-jdbc_driver_name = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.driver", "")
-
-if jdbc_driver_name == "com.mysql.jdbc.Driver":
-  jdbc_driver_jar = "/usr/share/java/mysql-connector-java.jar"
-elif jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
-  jdbc_driver_jar = "/usr/share/java/ojdbc6.jar"
-else:
-  jdbc_driver_jar = ""
-
-hostname = config["hostname"]
-falcon_host = default("/clusterHostInfo/falcon_server_hosts", [])
-has_falcon_host = not len(falcon_host)  == 0
-falcon_home = '/usr/lib/falcon'
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/package/scripts/service_check.py
deleted file mode 100644
index 7c1c1f2..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/package/scripts/service_check.py
+++ /dev/null
@@ -1,67 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-class OozieServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-    
-    # on HDP1 this file is different
-    smoke_test_file_name = 'oozieSmoke2.sh'
-
-    oozie_smoke_shell_file( smoke_test_file_name)
-  
-def oozie_smoke_shell_file(
-  file_name
-):
-  import params
-
-  File( format("/tmp/{file_name}"),
-    content = StaticFile(file_name),
-    mode = 0755
-  )
-  
-  if params.security_enabled:
-    sh_cmd = format("sh /tmp/{file_name} {conf_dir} {hadoop_conf_dir} {smokeuser} {security_enabled} {smokeuser_keytab} {kinit_path_local}")
-  else:
-    sh_cmd = format("sh /tmp/{file_name} {conf_dir} {hadoop_conf_dir} {smokeuser} {security_enabled}")
-
-  Execute( format("/tmp/{file_name}"),
-    command   = sh_cmd,
-    tries     = 3,
-    try_sleep = 5,
-    logoutput = True
-  )
-    
-def main():
-  import sys
-  command_type = 'service_check'
-  command_data_file = '/root/workspace/Oozie/input.json'
-  basedir = '/root/workspace/Oozie/main'
-  sys.argv = ["", command_type, command_data_file, basedir]
-  
-  OozieServiceCheck().execute()
-  
-if __name__ == "__main__":
-  OozieServiceCheck().execute()
-  #main()
-  

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/package/scripts/status_params.py
deleted file mode 100644
index c44fcf4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/package/scripts/status_params.py
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-oozie_pid_dir = config['configurations']['global']['oozie_pid_dir']
-pid_file = format("{oozie_pid_dir}/oozie.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/package/templates/oozie-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/package/templates/oozie-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/package/templates/oozie-env.sh.j2
deleted file mode 100644
index 270a1a8..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/package/templates/oozie-env.sh.j2
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/bin/bash
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-# 
-#      http://www.apache.org/licenses/LICENSE-2.0
-# 
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#Set JAVA HOME
-export JAVA_HOME={{java_home}}
-
-# Set Oozie specific environment variables here.
-
-# Settings for the Embedded Tomcat that runs Oozie
-# Java System properties for Oozie should be specified in this variable
-#
-# export CATALINA_OPTS=
-
-# Oozie configuration file to load from Oozie configuration directory
-#
-# export OOZIE_CONFIG_FILE=oozie-site.xml
-
-# Oozie logs directory
-#
-export OOZIE_LOG={{oozie_log_dir}}
-
-# Oozie pid directory
-#
-export CATALINA_PID={{pid_file}}
-
-#Location of the data for oozie
-export OOZIE_DATA={{oozie_data_dir}}
-
-# Oozie Log4J configuration file to load from Oozie configuration directory
-#
-# export OOZIE_LOG4J_FILE=oozie-log4j.properties
-
-# Reload interval of the Log4J configuration file, in seconds
-#
-# export OOZIE_LOG4J_RELOAD=10
-
-# The port Oozie server runs
-#
-# export OOZIE_HTTP_PORT=11000
-
-# The host name Oozie server runs on
-#
-# export OOZIE_HTTP_HOSTNAME=`hostname -f`
-
-# The base URL for callback URLs to Oozie
-#
-# export OOZIE_BASE_URL="http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie"
-export JAVA_LIBRARY_PATH=/usr/lib/hadoop/lib/native/Linux-amd64-64

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/package/templates/oozie-log4j.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/package/templates/oozie-log4j.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/package/templates/oozie-log4j.properties.j2
deleted file mode 100644
index e4a2662..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/package/templates/oozie-log4j.properties.j2
+++ /dev/null
@@ -1,74 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#    http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License. See accompanying LICENSE file.
-#
-
-# If the Java System property 'oozie.log.dir' is not defined at Oozie start up time
-# XLogService sets its value to '${oozie.home}/logs'
-
-log4j.appender.oozie=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozie.DatePattern='.'yyyy-MM-dd-HH
-log4j.appender.oozie.File=${oozie.log.dir}/oozie.log
-log4j.appender.oozie.Append=true
-log4j.appender.oozie.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozie.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.appender.oozieops=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozieops.DatePattern='.'yyyy-MM-dd
-log4j.appender.oozieops.File=${oozie.log.dir}/oozie-ops.log
-log4j.appender.oozieops.Append=true
-log4j.appender.oozieops.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozieops.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.appender.oozieinstrumentation=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozieinstrumentation.DatePattern='.'yyyy-MM-dd
-log4j.appender.oozieinstrumentation.File=${oozie.log.dir}/oozie-instrumentation.log
-log4j.appender.oozieinstrumentation.Append=true
-log4j.appender.oozieinstrumentation.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozieinstrumentation.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.appender.oozieaudit=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.oozieaudit.DatePattern='.'yyyy-MM-dd
-log4j.appender.oozieaudit.File=${oozie.log.dir}/oozie-audit.log
-log4j.appender.oozieaudit.Append=true
-log4j.appender.oozieaudit.layout=org.apache.log4j.PatternLayout
-log4j.appender.oozieaudit.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.appender.openjpa=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.openjpa.DatePattern='.'yyyy-MM-dd
-log4j.appender.openjpa.File=${oozie.log.dir}/oozie-jpa.log
-log4j.appender.openjpa.Append=true
-log4j.appender.openjpa.layout=org.apache.log4j.PatternLayout
-log4j.appender.openjpa.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
-
-log4j.logger.openjpa=INFO, openjpa
-log4j.logger.oozieops=INFO, oozieops
-log4j.logger.oozieinstrumentation=ALL, oozieinstrumentation
-log4j.logger.oozieaudit=ALL, oozieaudit
-log4j.logger.org.apache.oozie=INFO, oozie
-log4j.logger.org.apache.hadoop=WARN, oozie
-log4j.logger.org.mortbay=WARN, oozie
-log4j.logger.org.hsqldb=WARN, oozie
-log4j.logger.org.apache.hadoop.security.authentication.server=INFO, oozie

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/PIG/configuration/pig.properties
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/PIG/configuration/pig.properties b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/PIG/configuration/pig.properties
deleted file mode 100644
index 01000b5..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/PIG/configuration/pig.properties
+++ /dev/null
@@ -1,52 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#      http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Pig default configuration file. All values can be overwritten by pig.properties and command line arguments.
-# see bin/pig -help
-
-# brief logging (no timestamps)
-brief=false
-
-#debug level, INFO is default
-debug=INFO
-
-#verbose print all log messages to screen (default to print only INFO and above to screen)
-verbose=false
-
-#exectype local|mapreduce, mapreduce is default
-exectype=mapreduce
-
-#Enable insertion of information about script into hadoop job conf 
-pig.script.info.enabled=true
-
-#Do not spill temp files smaller than this size (bytes)
-pig.spill.size.threshold=5000000
-#EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)
-#This should help reduce the number of files being spilled.
-pig.spill.gc.activation.size=40000000
-
-#the following two parameters are to help estimate the reducer number
-pig.exec.reducers.bytes.per.reducer=1000000000
-pig.exec.reducers.max=999
-
-#Temporary location to store the intermediate data.
-pig.temp.dir=/tmp/
-
-#Threshold for merging FRJoin fragment files
-pig.files.concatenation.threshold=100
-pig.optimistic.files.concatenation=false;
-
-pig.disable.counter=false

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/PIG/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/PIG/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/PIG/metainfo.xml
deleted file mode 100644
index b491900..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/PIG/metainfo.xml
+++ /dev/null
@@ -1,60 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>PIG</name>
-      <comment>Scripting platform for analyzing large datasets</comment>
-      <version>0.12.0.2.0.6.0</version>
-      <components>
-        <component>
-          <name>PIG</name>
-          <category>CLIENT</category>
-          <commandScript>
-            <script>scripts/pig_client.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osType>centos6</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>pig</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <configuration-dependencies>
-        <config-type>global</config-type>
-      </configuration-dependencies>
-
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/PIG/package/files/pigSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/PIG/package/files/pigSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/PIG/package/files/pigSmoke.sh
deleted file mode 100644
index a22456e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/PIG/package/files/pigSmoke.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-/*Licensed to the Apache Software Foundation (ASF) under one or more
- contributor license agreements.  See the NOTICE file distributed with
- this work for additional information regarding copyright ownership.
- The ASF licenses this file to You under the Apache License, Version 2.0
- (the "License"); you may not use this file except in compliance with
- the License.  You may obtain a copy of the License at
-
-     http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License */
-
-A = load 'passwd' using PigStorage(':');
-B = foreach A generate \$0 as id;
-store B into 'pigsmoke.out';

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/PIG/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/PIG/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/PIG/package/scripts/params.py
deleted file mode 100644
index 86e962c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/PIG/package/scripts/params.py
+++ /dev/null
@@ -1,36 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-# server configurations
-config = Script.get_config()
-
-pig_conf_dir = "/etc/pig/conf"
-hadoop_conf_dir = "/etc/hadoop/conf"
-hdfs_user = config['configurations']['global']['hdfs_user']
-smokeuser = config['configurations']['global']['smokeuser']
-user_group = config['configurations']['global']['user_group']
-
-# not supporting 32 bit jdk.
-java64_home = config['hostLevelParams']['java_home']
-hadoop_home = "/usr"
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/PIG/package/scripts/pig.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/PIG/package/scripts/pig.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/PIG/package/scripts/pig.py
deleted file mode 100644
index c2d7b02..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/PIG/package/scripts/pig.py
+++ /dev/null
@@ -1,46 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-def pig():
-  import params
-
-  Directory( params.pig_conf_dir,
-    owner = params.hdfs_user,
-    group = params.user_group
-  )
-
-  pig_TemplateConfig( ['pig-env.sh','pig.properties','log4j.properties'])
-  
-  
-def pig_TemplateConfig(name):
-  import params
-  
-  if not isinstance(name, list):
-    name = [name]
-    
-  for x in name:
-    TemplateConfig( format("{pig_conf_dir}/{x}"),
-        owner = params.hdfs_user
-    )
-  
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/PIG/package/scripts/pig_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/PIG/package/scripts/pig_client.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/PIG/package/scripts/pig_client.py
deleted file mode 100644
index acd0cb1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/PIG/package/scripts/pig_client.py
+++ /dev/null
@@ -1,52 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-from resource_management import *
-from pig import pig
-
-         
-class PigClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-    
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    pig()
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-    
-#for tests
-def main():
-  command_type = 'install'
-  command_data_file = '/root/workspace/Pig/input.json'
-  basedir = '/root/workspace/Pig/main'
-  sys.argv = ["", command_type, command_data_file, basedir]
-  
-  PigClient().execute()
-  
-if __name__ == "__main__":
-  #main()
-  PigClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/PIG/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/PIG/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/PIG/package/scripts/service_check.py
deleted file mode 100644
index 3cca087..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/PIG/package/scripts/service_check.py
+++ /dev/null
@@ -1,75 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-class PigServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-    
-    input_file = 'passwd'
-    output_file = "pigsmoke.out"
-  
-    cleanup_cmd = format("dfs -rmr {output_file} {input_file}")
-    #cleanup put below to handle retries; if retrying there wil be a stale file that needs cleanup; exit code is fn of second command
-    create_file_cmd = format("{cleanup_cmd}; hadoop dfs -put /etc/passwd {input_file} ") #TODO: inconsistent that second command needs hadoop
-    test_cmd = format("fs -test -e {output_file}")
-  
-    ExecuteHadoop( create_file_cmd,
-      tries     = 3,
-      try_sleep = 5,
-      user      = params.smokeuser,
-      conf_dir = params.hadoop_conf_dir
-    )
-  
-    File( '/tmp/pigSmoke.sh',
-      content = StaticFile("pigSmoke.sh"),
-      mode = 0755
-    )
-  
-    Execute( "pig /tmp/pigSmoke.sh",
-      tries     = 3,
-      try_sleep = 5,
-      path      = '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-      user      = params.smokeuser,
-      logoutput = True
-    )
-  
-    ExecuteHadoop( test_cmd,
-      user      = params.smokeuser,
-      conf_dir = params.hadoop_conf_dir
-    )
-    
-def main():
-  import sys
-  command_type = 'service_check'
-  command_data_file = '/root/workspace/Pig/input.json'
-  basedir = '/root/workspace/Pig/main'
-  sys.argv = ["", command_type, command_data_file, basedir]
-  
-  PigServiceCheck().execute()
-  
-if __name__ == "__main__":
-  #main()
-  PigServiceCheck().execute()
-  

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/PIG/package/templates/log4j.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/PIG/package/templates/log4j.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/PIG/package/templates/log4j.properties.j2
deleted file mode 100644
index 9ef6e2c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/PIG/package/templates/log4j.properties.j2
+++ /dev/null
@@ -1,30 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-# ***** Set root logger level to DEBUG and its only appender to A.
-log4j.logger.org.apache.pig=info, A
-
-# ***** A is set to be a ConsoleAppender.
-log4j.appender.A=org.apache.log4j.ConsoleAppender
-# ***** A uses PatternLayout.
-log4j.appender.A.layout=org.apache.log4j.PatternLayout
-log4j.appender.A.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/PIG/package/templates/pig-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/PIG/package/templates/pig-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/PIG/package/templates/pig-env.sh.j2
deleted file mode 100644
index b0e17d4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/PIG/package/templates/pig-env.sh.j2
+++ /dev/null
@@ -1,17 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-JAVA_HOME={{java64_home}}
-HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/PIG/package/templates/pig.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/PIG/package/templates/pig.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/PIG/package/templates/pig.properties.j2
deleted file mode 100644
index 6fcb233..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/PIG/package/templates/pig.properties.j2
+++ /dev/null
@@ -1,55 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-# Pig configuration file. All values can be overwritten by command line arguments.
-
-# log4jconf log4j configuration file
-# log4jconf=./conf/log4j.properties
-
-# a file that contains pig script
-#file=
-
-# load jarfile, colon separated
-#jar=
-
-#verbose print all log messages to screen (default to print only INFO and above to screen)
-#verbose=true
-
-#exectype local|mapreduce, mapreduce is default
-#exectype=local
-
-#pig.logfile=
-
-#Do not spill temp files smaller than this size (bytes)
-#pig.spill.size.threshold=5000000
-#EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)
-#This should help reduce the number of files being spilled.
-#pig.spill.gc.activation.size=40000000
-
-#the following two parameters are to help estimate the reducer number
-#pig.exec.reducers.bytes.per.reducer=1000000000
-#pig.exec.reducers.max=999
-
-#Use this option only when your Pig job will otherwise die because of
-#using more counter than hadoop configured limit
-#pig.disable.counter=true
-hcat.bin=/usr/bin/hcat

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/SQOOP/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/SQOOP/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/SQOOP/metainfo.xml
deleted file mode 100644
index 0070c34..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/SQOOP/metainfo.xml
+++ /dev/null
@@ -1,60 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>SQOOP</name>
-      <comment>Tool for transferring bulk data between Apache Hadoop and
-        structured data stores such as relational databases
-      </comment>
-      <version>1.4.4.2.0.6.0</version>
-
-      <components>
-        <component>
-          <name>SQOOP</name>
-          <category>CLIENT</category>
-          <commandScript>
-            <script>scripts/sqoop_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>sqoop</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>mysql-connector-java</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/SQOOP/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/SQOOP/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/SQOOP/package/scripts/__init__.py
deleted file mode 100644
index 5561e10..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/SQOOP/package/scripts/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/SQOOP/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/SQOOP/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/SQOOP/package/scripts/params.py
deleted file mode 100644
index 5655131..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/SQOOP/package/scripts/params.py
+++ /dev/null
@@ -1,37 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-security_enabled = config['configurations']['global']['security_enabled']
-smokeuser = config['configurations']['global']['smokeuser']
-user_group = config['configurations']['global']['user_group']
-
-sqoop_conf_dir = "/usr/lib/sqoop/conf"
-hbase_home = "/usr"
-hive_home = "/usr"
-zoo_conf_dir = "/etc/zookeeper"
-sqoop_lib = "/usr/lib/sqoop/lib"
-sqoop_user = "sqoop"
-
-keytab_path = config['configurations']['global']['keytab_path']
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/SQOOP/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/SQOOP/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/SQOOP/package/scripts/service_check.py
deleted file mode 100644
index c42501a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/SQOOP/package/scripts/service_check.py
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-
-from resource_management import *
-
-
-class SqoopServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-    if params.security_enabled:
-        Execute(format("{kinit_path_local}  -kt {smoke_user_keytab} {smokeuser}"))
-    Execute("sqoop version",
-            user = params.smokeuser,
-            logoutput = True
-    )
-
-if __name__ == "__main__":
-  SqoopServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/SQOOP/package/scripts/sqoop.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/SQOOP/package/scripts/sqoop.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/SQOOP/package/scripts/sqoop.py
deleted file mode 100644
index 148a833..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/SQOOP/package/scripts/sqoop.py
+++ /dev/null
@@ -1,52 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import sys
-
-def sqoop(type=None):
-  import params
-  Link(params.sqoop_lib + "/mysql-connector-java.jar",
-       to = '/usr/share/java/mysql-connector-java.jar'
-  )
-  Directory(params.sqoop_conf_dir,
-            owner = params.sqoop_user,
-            group = params.user_group
-  )
-  sqoop_TemplateConfig("sqoop-env.sh")
-  File (params.sqoop_conf_dir + "/sqoop-env-template.sh",
-          owner = params.sqoop_user,
-          group = params.user_group
-  )
-  File (params.sqoop_conf_dir + "/sqoop-site-template.xml",
-         owner = params.sqoop_user,
-         group = params.user_group
-  )
-  File (params.sqoop_conf_dir + "/sqoop-site.xml",
-         owner = params.sqoop_user,
-         group = params.user_group
-  )
-  pass
-
-def sqoop_TemplateConfig(name, tag=None):
-  import params
-  TemplateConfig( format("{sqoop_conf_dir}/{name}"),
-                  owner = params.sqoop_user,
-                  template_tag = tag
-  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/SQOOP/package/scripts/sqoop_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/SQOOP/package/scripts/sqoop_client.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/SQOOP/package/scripts/sqoop_client.py
deleted file mode 100644
index 6829557..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/SQOOP/package/scripts/sqoop_client.py
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from sqoop import sqoop
-
-
-class SqoopClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    sqoop(type='client')
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-if __name__ == "__main__":
-  SqoopClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/SQOOP/package/templates/sqoop-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/SQOOP/package/templates/sqoop-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/SQOOP/package/templates/sqoop-env.sh.j2
deleted file mode 100644
index 90cbc75..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/SQOOP/package/templates/sqoop-env.sh.j2
+++ /dev/null
@@ -1,36 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# included in all the hadoop scripts with source command
-# should not be executable directly
-# also should not be passed any arguments, since we need original $*
-
-# Set Hadoop-specific environment variables here.
-
-#Set path to where bin/hadoop is available
-#Set path to where bin/hadoop is available
-export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
-
-#set the path to where bin/hbase is available
-export HBASE_HOME=${HBASE_HOME:-/usr/lib/hbase}
-
-#Set the path to where bin/hive is available
-export HIVE_HOME=${HIVE_HOME:-/usr/lib/hive}
-
-#Set the path for where zookeper config dir is
-export ZOOCFGDIR=${ZOOCFGDIR:-/etc/zookeeper/conf}
-
-# add libthrift in hive to sqoop class path first so hive imports work
-export SQOOP_USER_CLASSPATH="`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:${SQOOP_USER_CLASSPATH}"

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/configuration/global.xml
deleted file mode 100644
index 5cc9170..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/configuration/global.xml
+++ /dev/null
@@ -1,39 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>storm_user</name>
-    <value>storm</value>
-    <description></description>
-  </property>
-  <property>
-    <name>storm_log_dir</name>
-    <value>/var/log/storm</value>
-    <description></description>
-  </property>
-  <property>
-    <name>storm_pid_dir</name>
-    <value>/var/run/storm</value>
-    <description></description>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/configuration/storm-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/configuration/storm-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/configuration/storm-site.xml
deleted file mode 100644
index f81b3c3..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/configuration/storm-site.xml
+++ /dev/null
@@ -1,514 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>java.library.path</name>
-    <value>/usr/local/lib:/opt/local/lib:/usr/lib</value>
-    <description></description>
-  </property>
-  <property>
-    <name>storm.local.dir</name>
-    <value>/hadoop/storm</value>
-    <description>The place where jars are kept</description>
-  </property>
-  <property>
-    <name>storm.zookeeper.servers</name>
-    <value>['localhost']</value>
-    <description></description>
-  </property>
-  <property>
-    <name>storm.zookeeper.port</name>
-    <value>2181</value>
-    <description></description>
-  </property>
-  <property>
-    <name>storm.zookeeper.root</name>
-    <value>/storm</value>
-    <description></description>
-  </property>
-  <property>
-    <name>storm.zookeeper.session.timeout</name>
-    <value>20000</value>
-    <description></description>
-  </property>
-  <property>
-    <name>storm.zookeeper.connection.timeout</name>
-    <value>15000</value>
-    <description></description>
-  </property>
-  <property>
-    <name>storm.zookeeper.retry.times</name>
-    <value>5</value>
-    <description></description>
-  </property>
-  <property>
-    <name>storm.zookeeper.retry.interval</name>
-    <value>1000</value>
-    <description></description>
-  </property>
-  <property>
-    <name>storm.zookeeper.retry.intervalceiling.millis</name>
-    <value>30000</value>
-    <description></description>
-  </property>
-  <property>
-    <name>storm.cluster.mode</name>
-    <value>distributed</value>
-    <description>Can be distributed or local</description>
-  </property>
-  <property>
-    <name>storm.local.mode.zmq</name>
-    <value>false</value>
-    <description></description>
-  </property>
-  <property>
-    <name>storm.thrift.transport</name>
-    <value>backtype.storm.security.auth.SimpleTransportPlugin</value>
-    <description></description>
-  </property>
-  <property>
-    <name>storm.messaging.transport</name>
-    <value>backtype.storm.messaging.netty.Context</value>
-    <description></description>
-  </property>
-  <property>
-    <name>storm.messaging.transport</name>
-    <value>backtype.storm.messaging.netty.Context</value>
-    <description></description>
-  </property>
-  <property>
-    <name>nimbus.host</name>
-    <value>localhost</value>
-    <description></description>
-  </property>
-  <property>
-    <name>nimbus.thrift.port</name>
-    <value>6627</value>
-    <description></description>
-  </property>
-  <property>
-    <name>nimbus.thrift.max_buffer_size</name>
-    <value>1048576</value>
-    <description></description>
-  </property>
-  <property>
-    <name>nimbus.childopts</name>
-    <value>-Xmx1024m</value>
-    <description></description>
-  </property>
-  <property>
-    <name>nimbus.task.timeout.secs</name>
-    <value>30</value>
-    <description></description>
-  </property>
-  <property>
-    <name>nimbus.supervisor.timeout.secs</name>
-    <value>60</value>
-    <description></description>
-  </property>
-  <property>
-    <name>nimbus.monitor.freq.secs</name>
-    <value>10</value>
-    <description></description>
-  </property>
-  <property>
-    <name>nimbus.cleanup.inbox.freq.secs</name>
-    <value>600</value>
-    <description></description>
-  </property>
-  <property>
-    <name>nimbus.inbox.jar.expiration.secs</name>
-    <value>3600</value>
-    <description></description>
-  </property>
-  <property>
-    <name>nimbus.task.launch.secs</name>
-    <value>120</value>
-    <description></description>
-  </property>
-  <property>
-    <name>nimbus.reassign</name>
-    <value>true</value>
-    <description></description>
-  </property>
-  <property>
-    <name>nimbus.file.copy.expiration.secs</name>
-    <value>600</value>
-    <description></description>
-  </property>
-  <property>
-    <name>nimbus.topology.validator</name>
-    <value>backtype.storm.nimbus.DefaultTopologyValidator</value>
-    <description></description>
-  </property>
-  <property>
-    <name>ui.port</name>
-    <value>8744</value>
-    <description></description>
-  </property>
-  <property>
-    <name>ui.childopts</name>
-    <value>-Xmx768m</value>
-    <description></description>
-  </property>
-  <property>
-    <name>logviewer.port</name>
-    <value>8000</value>
-    <description></description>
-  </property>
-  <property>
-    <name>logviewer.port</name>
-    <value>8000</value>
-    <description></description>
-  </property>
-  <property>
-    <name>logviewer.childopts</name>
-    <value>-Xmx128m</value>
-    <description></description>
-  </property>
-  <property>
-    <name>logviewer.appender.name</name>
-    <value>A1</value>
-    <description></description>
-  </property>
-  <property>
-    <name>drpc.port</name>
-    <value>3772</value>
-    <description></description>
-  </property>
-  <property>
-    <name>drpc.worker.threads</name>
-    <value>64</value>
-    <description></description>
-  </property>
-  <property>
-    <name>drpc.queue.size</name>
-    <value>128</value>
-    <description></description>
-  </property>
-  <property>
-    <name>drpc.invocations.port</name>
-    <value>3773</value>
-    <description></description>
-  </property>
-  <property>
-    <name>drpc.request.timeout.secs</name>
-    <value>600</value>
-    <description></description>
-  </property>
-  <property>
-    <name>drpc.childopts</name>
-    <value>-Xmx768m</value>
-    <description></description>
-  </property>
-  <property>
-    <name>transactional.zookeeper.root</name>
-    <value>/transactional</value>
-    <description></description>
-  </property>
-  <property>
-    <name>transactional.zookeeper.servers</name>
-    <value>null</value>
-    <description></description>
-  </property>
-  <property>
-    <name>transactional.zookeeper.port</name>
-    <value>null</value>
-    <description></description>
-  </property>
-  <property>
-    <name>supervisor.slots.ports</name>
-    <value>[6700, 6701]</value>
-    <description></description>
-  </property>
-  <property>
-    <name>supervisor.childopts</name>
-    <value>-Xmx256m</value>
-    <description></description>
-  </property>
-  <property>
-    <name>supervisor.worker.start.timeout.secs</name>
-    <value>120</value>
-    <description>How long supervisor will wait to ensure that a worker process is started</description>
-  </property>
-  <property>
-    <name>supervisor.worker.timeout.secs</name>
-    <value>30</value>
-    <description>How long between heartbeats until supervisor considers that worker dead and tries to restart it</description>
-  </property>
-  <property>
-    <name>supervisor.monitor.frequency.secs</name>
-    <value>3</value>
-    <description>How frequently the supervisor checks on the status of the processes it's monitoring and restarts if necessary</description>
-  </property>
-  <property>
-    <name>supervisor.heartbeat.frequency.secs</name>
-    <value>5</value>
-    <description>How frequently the supervisor heartbeats to the cluster state (for nimbus)</description>
-  </property>
-  <property>
-    <name>supervisor.enable</name>
-    <value>true</value>
-    <description></description>
-  </property>
-  <property>
-    <name>worker.childopts</name>
-    <value>-Xmx768m</value>
-    <description></description>
-  </property>
-  <property>
-    <name>worker.heartbeat.frequency.secs</name>
-    <value>1</value>
-    <description></description>
-  </property>
-  <property>
-    <name>task.heartbeat.frequency.secs</name>
-    <value>3</value>
-    <description></description>
-  </property>
-  <property>
-    <name>task.refresh.poll.secs</name>
-    <value>10</value>
-    <description></description>
-  </property>
-  <property>
-    <name>zmq.threads</name>
-    <value>1</value>
-    <description></description>
-  </property>
-  <property>
-    <name>zmq.linger.millis</name>
-    <value>5000</value>
-    <description></description>
-  </property>
-  <property>
-    <name>zmq.hwm</name>
-    <value>0</value>
-    <description></description>
-  </property>
-  <property>
-    <name>storm.messaging.netty.server_worker_threads</name>
-    <value>1</value>
-    <description></description>
-  </property>
-  <property>
-    <name>storm.messaging.netty.client_worker_threads</name>
-    <value>1</value>
-    <description></description>
-  </property>
-  <property>
-    <name>storm.messaging.netty.buffer_size</name>
-    <value>5242880</value>
-    <description></description>
-  </property>
-  <property>
-    <name>storm.messaging.netty.max_retries</name>
-    <value>30</value>
-    <description></description>
-  </property>
-  <property>
-    <name>storm.messaging.netty.max_wait_ms</name>
-    <value>1000</value>
-    <description></description>
-  </property>
-  <property>
-    <name>storm.messaging.netty.min_wait_ms</name>
-    <value>100</value>
-    <description></description>
-  </property>
-  <property>
-    <name>topology.enable.message.timeouts</name>
-    <value>true</value>
-    <description></description>
-  </property>
-  <property>
-    <name>topology.debug</name>
-    <value>false</value>
-    <description></description>
-  </property>
-  <property>
-    <name>topology.optimize</name>
-    <value>true</value>
-    <description></description>
-  </property>
-  <property>
-    <name>topology.workers</name>
-    <value>1</value>
-    <description></description>
-  </property>
-  <property>
-    <name>topology.acker.executors</name>
-    <value>null</value>
-    <description></description>
-  </property>
-  <property>
-    <name>topology.message.timeout.secs</name>
-    <value>30</value>
-    <description>Maximum amount of time a message has to complete before it's considered failed</description>
-  </property>
-  <property>
-    <name>topology.skip.missing.kryo.registrations</name>
-    <value>false</value>
-    <description></description>
-  </property>
-  <property>
-    <name>topology.max.task.parallelism</name>
-    <value>null</value>
-    <description></description>
-  </property>
-  <property>
-    <name>topology.max.spout.pending</name>
-    <value>null</value>
-    <description></description>
-  </property>
-  <property>
-    <name>topology.state.synchronization.timeout.secs</name>
-    <value>60</value>
-    <description></description>
-  </property>
-  <property>
-    <name>topology.stats.sample.rate</name>
-    <value>0.05</value>
-    <description></description>
-  </property>
-  <property>
-    <name>topology.builtin.metrics.bucket.size.secs</name>
-    <value>60</value>
-    <description></description>
-  </property>
-  <property>
-    <name>topology.fall.back.on.java.serialization</name>
-    <value>true</value>
-    <description></description>
-  </property>
-  <property>
-    <name>topology.worker.childopts</name>
-    <value>null</value>
-    <description></description>
-  </property>
-  <property>
-    <name>topology.executor.receive.buffer.size</name>
-    <value>1024</value>
-    <description></description>
-  </property>
-  <property>
-    <name>topology.executor.send.buffer.size</name>
-    <value>1024</value>
-    <description></description>
-  </property>
-  <property>
-    <name>topology.receiver.buffer.size</name>
-    <value>8</value>
-    <description>Setting it too high causes a lot of problems (heartbeat thread gets starved, throughput plummets)</description>
-  </property>
-  <property>
-    <name>topology.transfer.buffer.size</name>
-    <value>1024</value>
-    <description></description>
-  </property>
-  <property>
-    <name>topology.tick.tuple.freq.secs</name>
-    <value>null</value>
-    <description></description>
-  </property>
-  <property>
-    <name>topology.worker.shared.thread.pool.size</name>
-    <value>4</value>
-    <description></description>
-  </property>
-  <property>
-    <name>topology.disruptor.wait.strategy</name>
-    <value>com.lmax.disruptor.BlockingWaitStrategy</value>
-    <description></description>
-  </property>
-  <property>
-    <name>topology.executor.send.buffer.size</name>
-    <value>1024</value>
-    <description></description>
-  </property>
-  <property>
-    <name>topology.receiver.buffer.size</name>
-    <value>8</value>
-    <description></description>
-  </property>
-  <property>
-    <name>topology.transfer.buffer.size</name>
-    <value>1024</value>
-    <description></description>
-  </property>
-  <property>
-    <name>topology.tick.tuple.freq.secs</name>
-    <value>null</value>
-    <description></description>
-  </property>
-  <property>
-    <name>topology.worker.shared.thread.pool.size</name>
-    <value>4</value>
-    <description></description>
-  </property>
-  <property>
-    <name>topology.disruptor.wait.strategy</name>
-    <value>com.lmax.disruptor.BlockingWaitStrategy</value>
-    <description></description>
-  </property>
-  <property>
-    <name>topology.spout.wait.strategy</name>
-    <value>backtype.storm.spout.SleepSpoutWaitStrategy</value>
-    <description></description>
-  </property>
-  <property>
-    <name>topology.sleep.spout.wait.strategy.time.ms</name>
-    <value>1</value>
-    <description></description>
-  </property>
-  <property>
-    <name>topology.error.throttle.interval.secs</name>
-    <value>10</value>
-    <description></description>
-  </property>
-  <property>
-    <name>topology.max.error.report.per.interval</name>
-    <value>5</value>
-    <description></description>
-  </property>
-  <property>
-    <name>topology.kryo.factory</name>
-    <value>backtype.storm.serialization.DefaultKryoFactory</value>
-    <description></description>
-  </property>
-  <property>
-    <name>topology.tuple.serializer</name>
-    <value>backtype.storm.serialization.types.ListDelegateSerializer</value>
-    <description></description>
-  </property>
-  <property>
-    <name>topology.trident.batch.emit.interval.millis</name>
-    <value>500</value>
-    <description></description>
-  </property>
-  <property>
-    <name>dev.zookeeper.path</name>
-    <value>/tmp/dev-storm-zookeeper</value>
-    <description></description>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/metainfo.xml
deleted file mode 100644
index 95dd954..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/metainfo.xml
+++ /dev/null
@@ -1,104 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>STORM</name>
-      <comment>Apache Hadoop Stream processing framework</comment>
-      <version>0.9.0.1</version>
-      <components>
-
-        <component>
-          <name>NIMBUS</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/nimbus.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>SUPERVISOR</name>
-          <category>SLAVE</category>
-          <commandScript>
-            <script>scripts/supervisor.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>STORM_UI_SERVER</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/ui_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>DRPC_SERVER</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/drpc_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>LOGVIEWER_SERVER</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/logviewer_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-      </components>
-
-
-      <!--
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>storm</name>
-            </package>
-        </osSpecific>
-      </osSpecifics> -->
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <configuration-dependencies>
-        <config-type>storm-site</config-type>
-        <config-type>global</config-type>
-      </configuration-dependencies>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/files/wordCount.jar
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/files/wordCount.jar b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/files/wordCount.jar
deleted file mode 100644
index aed64be..0000000
Binary files a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/files/wordCount.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/scripts/drpc_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/scripts/drpc_server.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/scripts/drpc_server.py
deleted file mode 100644
index 325f86a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/scripts/drpc_server.py
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-from storm import storm
-from service import service
-from service_check import ServiceCheck
-
-
-class DrpcServer(Script):
-  def install(self, env):
-    self.install_packages(env)
-    # TODO remove
-    Execute("yum install http://s3.amazonaws.com/dev.hortonworks.com/storm/storm-0.9.1.2.0.6.1-1.el6.noarch.rpm -y",
-            ignore_failures = True)
-
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    storm()
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env)
-
-    service("drpc", action="start")
-
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    service("drpc", action="stop")
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.pid_drpc)
-
-if __name__ == "__main__":
-  DrpcServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/scripts/logviewer_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/scripts/logviewer_server.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/scripts/logviewer_server.py
deleted file mode 100644
index c209036..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/scripts/logviewer_server.py
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-from storm import storm
-from service import service
-from service_check import ServiceCheck
-
-
-class LogviewerServer(Script):
-  def install(self, env):
-    self.install_packages(env)
-    # TODO remove
-    Execute("yum install http://s3.amazonaws.com/dev.hortonworks.com/storm/storm-0.9.1.2.0.6.1-1.el6.noarch.rpm -y",
-            ignore_failures = True)
-
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    storm()
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env)
-
-    service("logviewer", action="start")
-
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    service("logviewer", action="stop")
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.pid_logviewer)
-
-if __name__ == "__main__":
-  LogviewerServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/scripts/nimbus.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/scripts/nimbus.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/scripts/nimbus.py
deleted file mode 100644
index 7210314..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/scripts/nimbus.py
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-from storm import storm
-from service import service
-from service_check import ServiceCheck
-
-
-class Nimbus(Script):
-  def install(self, env):
-    self.install_packages(env)
-    # TODO remove
-    Execute("yum install http://s3.amazonaws.com/dev.hortonworks.com/storm/storm-0.9.1.2.0.6.1-1.el6.noarch.rpm -y",
-            ignore_failures = True)
-
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    storm()
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env)
-
-    service("nimbus", action="start")
-
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    service("nimbus", action="stop")
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.pid_nimbus)
-
-if __name__ == "__main__":
-  Nimbus().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/scripts/params.py
deleted file mode 100644
index 57ff774..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/scripts/params.py
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import status_params
-
-# server configurations
-config = Script.get_config()
-
-storm_user = config['configurations']['global']['storm_user']
-log_dir = config['configurations']['global']['storm_log_dir']
-pid_dir = status_params.pid_dir
-conf_dir = "/etc/storm/conf"
-local_dir = config['configurations']['storm-site']['storm.local.dir']
-user_group = config['configurations']['global']['user_group']
-java64_home = config['hostLevelParams']['java_home']
-nimbus_host = config['configurations']['storm-site']['nimbus.host']

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/scripts/service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/scripts/service.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/scripts/service.py
deleted file mode 100644
index 721acf1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/scripts/service.py
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-
-from resource_management import *
-from resource_management.core.shell import call
-import subprocess
-
-
-def service(
-    name,
-    action='start'):
-  import params
-  import status_params
-
-  pid_file = status_params.pid_files[name]
-
-  if action == "start":
-    cmd = ["/usr/bin/storm", name]
-    if name == "ui":
-      crt_pid_cmd = format("pgrep -f \"^java.+backtype.storm.ui.core$\" > {pid_file}")
-    else :
-      crt_pid_cmd = format("pgrep -f \"^java.+backtype.storm.daemon.{name}$\" > {pid_file}")
-
-    no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
-
-    #Execute(cmd,
-    #        not_if=no_op_test,
-    #        user=params.storm_user
-    #)
-
-    #TODO run from storm user
-
-    if call(no_op_test)[0]:
-      subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env={"PATH":format("{java64_home}/bin:/bin")})
-
-    Execute(crt_pid_cmd,
-            logoutput=True,
-            tries=6,
-            try_sleep=10
-    )
-
-  elif action == "stop":
-    cmd = format("kill `cat {pid_file}` >/dev/null 2>&1 && rm -f {pid_file}")
-    Execute(cmd)

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/scripts/service_check.py
deleted file mode 100644
index 1cfbc48..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/scripts/service_check.py
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-class ServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    unique = get_unique_id_and_date()
-
-    File("/tmp/wordCount.jar",
-         content=StaticFile("wordCount.jar")
-    )
-
-    cmd = format("env PATH=$PATH:{java64_home}/bin storm jar /tmp/wordCount.jar storm.starter.WordCountTopology WordCount{unique} -c nimbus.host={nimbus_host}")
-
-    Execute(cmd,
-            logoutput=True
-    )
-
-    Execute(format("env PATH=$PATH:{java64_home}/bin storm kill WordCount{unique}"))
-
-if __name__ == "__main__":
-  ServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/scripts/status_params.py
deleted file mode 100644
index 70b034a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/STORM/package/scripts/status_params.py
+++ /dev/null
@@ -1,35 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-from resource_management import *
-
-config = Script.get_config()
-
-pid_dir = config['configurations']['global']['storm_pid_dir']
-pid_nimbus = format("{pid_dir}/nimbus.pid")
-pid_supervisor = format("{pid_dir}/supervisor.pid")
-pid_drpc = format("{pid_dir}/drpc.pid")
-pid_ui = format("{pid_dir}/ui.pid")
-pid_logviewer = format("{pid_dir}/logviewer.pid")
-
-pid_files = {"logviewer":pid_logviewer,
-             "ui": pid_ui,
-             "nimbus": pid_nimbus,
-             "supervisor": pid_supervisor,
-             "drpc": pid_drpc}
\ No newline at end of file


[28/37] AMBARI-4341. Rename 2.0.8 to 2.1.1 in the stack definition. (mahadev)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/role_command_order.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/role_command_order.json b/ambari-server/src/main/resources/stacks/HDP/2.0.8/role_command_order.json
deleted file mode 100644
index 416945d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/role_command_order.json
+++ /dev/null
@@ -1,107 +0,0 @@
-{
-  "_comment" : "Record format:",
-  "_comment" : "blockedRole-blockedCommand: [blockerRole1-blockerCommand1, blockerRole2-blockerCommand2, ...]",
-  "general_deps" : {
-    "_comment" : "dependencies for all cases",
-    "NAGIOS_SERVER-INSTALL" : ["HIVE_CLIENT-INSTALL", "HCAT-INSTALL",
-        "MAPREDUCE_CLIENT-INSTALL", "OOZIE_CLIENT-INSTALL"],
-    "NIMBUS-START" : ["ZOOKEEPER_SERVER-START"],
-    "SUPERVISOR-START" : ["NIMBUS-START"],
-    "STORM_UI_SERVER-START" : ["NIMBUS-START"],
-    "DRPC_SERVER-START" : ["NIMBUS-START"],
-    "LOGVIEWER_SERVER-START" : ["NIMBUS-START"],
-    "HBASE_MASTER-START": ["ZOOKEEPER_SERVER-START"],
-    "HBASE_REGIONSERVER-START": ["HBASE_MASTER-START"],
-    "OOZIE_SERVER-START": ["JOBTRACKER-START", "TASKTRACKER-START"],
-    "WEBHCAT_SERVER-START": ["TASKTRACKER-START", "HIVE_SERVER-START"],
-    "HIVE_METASTORE-START": ["MYSQL_SERVER-START"],
-    "HIVE_SERVER-START": ["TASKTRACKER-START", "MYSQL_SERVER-START"],
-    "HUE_SERVER-START": ["HIVE_SERVER-START", "HCAT-START", "OOZIE_SERVER-START"],
-    "FLUME_SERVER-START": ["OOZIE_SERVER-START"],
-    "NAGIOS_SERVER-START": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START",
-        "GANGLIA_SERVER-START", "GANGLIA_MONITOR-START", "HCAT-START",
-        "HIVE_SERVER-START", "HIVE_METASTORE-START", "HUE_SERVER-START",
-        "JOBTRACKER-START", "TASKTRACKER-START", "ZOOKEEPER_SERVER-START",
-        "MYSQL_SERVER-START", "OOZIE_SERVER-START", "PIG-START", "SQOOP-START",
-        "WEBHCAT_SERVER-START", "FLUME_SERVER-START"],
-    "MAPREDUCE_SERVICE_CHECK-SERVICE_CHECK": ["JOBTRACKER-START", "TASKTRACKER-START"],
-    "OOZIE_SERVICE_CHECK-SERVICE_CHECK": ["OOZIE_SERVER-START"],
-    "WEBHCAT_SERVICE_CHECK-SERVICE_CHECK": ["WEBHCAT_SERVER-START"],
-    "HBASE_SERVICE_CHECK-SERVICE_CHECK": ["HBASE_MASTER-START", "HBASE_REGIONSERVER-START"],
-    "HIVE_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START", "HIVE_METASTORE-START"],
-    "HCAT_SERVICE_CHECK-SERVICE_CHECK": ["HIVE_SERVER-START"],
-    "PIG_SERVICE_CHECK-SERVICE_CHECK": ["JOBTRACKER-START", "TASKTRACKER-START"],
-    "SQOOP_SERVICE_CHECK-SERVICE_CHECK": ["JOBTRACKER-START", "TASKTRACKER-START"],
-    "ZOOKEEPER_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"],
-    "ZOOKEEPER_QUORUM_SERVICE_CHECK-SERVICE_CHECK": ["ZOOKEEPER_SERVER-START"],
-    "STORM_SERVICE_CHECK-SERVICE_CHECK": ["NIMBUS-START", "SUPERVISOR-START"],
-    "ZOOKEEPER_SERVER-STOP" : ["HBASE_MASTER-STOP", "HBASE_REGIONSERVER-STOP"],
-    "HBASE_MASTER-STOP": ["HBASE_REGIONSERVER-STOP"],
-    "NIMBUS-STOP" : ["SUPERVISOR-STOP", "STORM_UI_SERVER-STOP", "DRPC_SERVER-STOP", "LOGVIEWER_SERVER-STOP"],
-    "TASKTRACKER-UPGRADE": ["JOBTRACKER-UPGRADE"],
-    "MAPREDUCE_CLIENT-UPGRADE": ["TASKTRACKER-UPGRADE", "JOBTRACKER-UPGRADE"],
-    "ZOOKEEPER_SERVER-UPGRADE": ["MAPREDUCE_CLIENT-UPGRADE"],
-    "ZOOKEEPER_CLIENT-UPGRADE": ["ZOOKEEPER_SERVER-UPGRADE"],
-    "HBASE_MASTER-UPGRADE": ["ZOOKEEPER_CLIENT-UPGRADE"],
-    "HBASE_REGIONSERVER-UPGRADE": ["HBASE_MASTER-UPGRADE"],
-    "HBASE_CLIENT-UPGRADE": ["HBASE_REGIONSERVER-UPGRADE"],
-    "HIVE_SERVER-UPGRADE" : ["HBASE_CLIENT-UPGRADE"],
-    "HIVE_METASTORE-UPGRADE" : ["HIVE_SERVER-UPGRADE"],
-    "MYSQL_SERVER-UPGRADE": ["HIVE_METASTORE-UPGRADE"],
-    "HIVE_CLIENT-UPGRADE": ["MYSQL_SERVER-UPGRADE"],
-    "HCAT-UPGRADE": ["HIVE_CLIENT-UPGRADE"],
-    "OOZIE_SERVER-UPGRADE" : ["HCAT-UPGRADE"],
-    "OOZIE_CLIENT-UPGRADE" : ["OOZIE_SERVER-UPGRADE"],
-    "WEBHCAT_SERVER-UPGRADE" : ["OOZIE_CLIENT-UPGRADE"],
-    "PIG-UPGRADE" : ["WEBHCAT_SERVER-UPGRADE"],
-    "SQOOP-UPGRADE" : ["PIG-UPGRADE"],
-    "NAGIOS_SERVER-UPGRADE" : ["SQOOP-UPGRADE"],
-    "GANGLIA_SERVER-UPGRADE" : ["NAGIOS_SERVER-UPGRADE"],
-    "GANGLIA_MONITOR-UPGRADE" : ["GANGLIA_SERVER-UPGRADE"]
-  },
-  "_comment" : "GLUSTERFS-specific dependencies",
-  "optional_glusterfs": {
-    "HBASE_MASTER-START": ["PEERSTATUS-START"],
-    "JOBTRACKER-START": ["PEERSTATUS-START"],
-    "TASKTRACKER-START": ["PEERSTATUS-START"],
-    "GLUSTERFS_SERVICE_CHECK-SERVICE_CHECK": ["PEERSTATUS-START"],
-    "JOBTRACKER-UPGRADE": ["GLUSTERFS_CLIENT-UPGRADE"]
-  },
-  "_comment" : "Dependencies that are used when GLUSTERFS is not present in cluster",
-  "optional_no_glusterfs": {
-    "SECONDARY_NAMENODE-START": ["NAMENODE-START"],
-    "RESOURCEMANAGER-START": ["NAMENODE-START", "DATANODE-START"],
-    "NODEMANAGER-START": ["NAMENODE-START", "DATANODE-START", "RESOURCEMANAGER-START"],
-    "HISTORYSERVER-START": ["NAMENODE-START", "DATANODE-START"],
-    "HBASE_MASTER-START": ["NAMENODE-START", "DATANODE-START"],
-    "JOBTRACKER-START": ["NAMENODE-START", "DATANODE-START"],
-    "TASKTRACKER-START": ["NAMENODE-START", "DATANODE-START"],
-    "HIVE_SERVER-START": ["DATANODE-START"],
-    "WEBHCAT_SERVER-START": ["DATANODE-START"],
-    "NAGIOS_SERVER-START": ["NAMENODE-START", "SECONDARY_NAMENODE-START",
-        "DATANODE-START", "RESOURCEMANAGER-START", "NODEMANAGER-START", "HISTORYSERVER-START"],
-    "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["NAMENODE-START", "DATANODE-START",
-        "SECONDARY_NAMENODE-START"],
-    "MAPREDUCE2_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START",
-        "RESOURCEMANAGER-START", "HISTORYSERVER-START", "YARN_SERVICE_CHECK-SERVICE_CHECK"],
-    "YARN_SERVICE_CHECK-SERVICE_CHECK": ["NODEMANAGER-START", "RESOURCEMANAGER-START"],
-    "RESOURCEMANAGER_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START"],
-    "PIG_SERVICE_CHECK-SERVICE_CHECK": ["RESOURCEMANAGER-START", "NODEMANAGER-START"],
-    "NAMENODE-STOP": ["JOBTRACKER-STOP", "TASKTRACKER-STOP", "RESOURCEMANAGER-STOP",
-        "NODEMANAGER-STOP", "HISTORYSERVER-STOP", "HBASE_MASTER-STOP"],
-    "DATANODE-STOP": ["JOBTRACKER-STOP", "TASKTRACKER-STOP", "RESOURCEMANAGER-STOP",
-        "NODEMANAGER-STOP", "HISTORYSERVER-STOP", "HBASE_MASTER-STOP"],
-    "SECONDARY_NAMENODE-UPGRADE": ["NAMENODE-UPGRADE"],
-    "DATANODE-UPGRADE": ["SECONDARY_NAMENODE-UPGRADE"],
-    "HDFS_CLIENT-UPGRADE": ["DATANODE-UPGRADE"],
-    "JOBTRACKER-UPGRADE": ["HDFS_CLIENT-UPGRADE"]
-  },
-  "_comment" : "Dependencies that are used in HA NameNode cluster",
-  "optional_ha": {
-    "NAMENODE-START": ["JOURNALNODE-START", "ZOOKEEPER_SERVER-START"],
-    "ZKFC-START": ["NAMENODE-START"],
-    "NAGIOS_SERVER-START": ["ZKFC-START", "JOURNALNODE-START"],
-    "HDFS_SERVICE_CHECK-SERVICE_CHECK": ["ZKFC-START"]
-  }
-}
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/configuration/global.xml
deleted file mode 100644
index 1d56238..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/configuration/global.xml
+++ /dev/null
@@ -1,42 +0,0 @@
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>falcon_user</name>
-    <value>falcon</value>
-    <description>Falcon user.</description>
-  </property>
-  <property>
-    <name>falcon_port</name>
-    <value>15000</value>
-    <description>Falcon server port.</description>
-  </property>
-  <property>
-    <name>falcon_local_dir</name>
-    <value>/hadoop/falcon</value>
-    <description>Falcon directory to store different data.</description>
-  </property>
-  <property>
-    <name>falcon_store_uri</name>
-    <value>file:///hadoop/falcon/store</value>
-    <description>Falcon store URI.</description>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/configuration/oozie-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/configuration/oozie-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/configuration/oozie-site.xml
deleted file mode 100644
index 186677b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/configuration/oozie-site.xml
+++ /dev/null
@@ -1,145 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->
-
-<configuration>
-  <property>
-    <name>oozie.service.ProxyUserService.proxyuser.falcon.hosts</name>
-    <value>*</value>
-    <description>Falcon proxyuser hosts</description>
-  </property>
-
-  <property>
-    <name>oozie.service.ProxyUserService.proxyuser.falcon.groups</name>
-    <value>*</value>
-    <description>Falcon proxyuser groups</description>
-  </property>
-
-  <property>
-    <name>oozie.service.ELService.ext.functions.coord-job-submit-instances</name>
-    <value>
-      now=org.apache.oozie.extensions.OozieELExtensions#ph1_now_echo,
-      today=org.apache.oozie.extensions.OozieELExtensions#ph1_today_echo,
-      yesterday=org.apache.oozie.extensions.OozieELExtensions#ph1_yesterday_echo,
-      currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_currentMonth_echo,
-      lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_lastMonth_echo,
-      currentYear=org.apache.oozie.extensions.OozieELExtensions#ph1_currentYear_echo,
-      lastYear=org.apache.oozie.extensions.OozieELExtensions#ph1_lastYear_echo,
-      formatTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_formatTime_echo,
-      latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,
-      future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo
-    </value>
-    <description>Falcon</description>
-  </property>
-
-  <property>
-    <name>oozie.service.ELService.ext.functions.coord-action-create-inst</name>
-    <value>
-      now=org.apache.oozie.extensions.OozieELExtensions#ph2_now_inst,
-      today=org.apache.oozie.extensions.OozieELExtensions#ph2_today_inst,
-      yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday_inst,
-      currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth_inst,
-      lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth_inst,
-      currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear_inst,
-      lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear_inst,
-      latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,
-      future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo,
-      formatTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_formatTime,
-      user=org.apache.oozie.coord.CoordELFunctions#coord_user
-    </value>
-    <description>Falcon</description>
-  </property>
-
-  <property>
-    <name>oozie.service.ELService.ext.functions.coord-action-create</name>
-    <value>
-      now=org.apache.oozie.extensions.OozieELExtensions#ph2_now,
-      today=org.apache.oozie.extensions.OozieELExtensions#ph2_today,
-      yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday,
-      currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth,
-      lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth,
-      currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear,
-      lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear,
-      latest=org.apache.oozie.coord.CoordELFunctions#ph2_coord_latest_echo,
-      future=org.apache.oozie.coord.CoordELFunctions#ph2_coord_future_echo,
-      formatTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_formatTime,
-      user=org.apache.oozie.coord.CoordELFunctions#coord_user
-    </value>
-    <description>Falcon</description>
-  </property>
-
-  <property>
-    <name>oozie.service.ELService.ext.functions.coord-job-submit-data</name>
-    <value>
-      now=org.apache.oozie.extensions.OozieELExtensions#ph1_now_echo,
-      today=org.apache.oozie.extensions.OozieELExtensions#ph1_today_echo,
-      yesterday=org.apache.oozie.extensions.OozieELExtensions#ph1_yesterday_echo,
-      currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_currentMonth_echo,
-      lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph1_lastMonth_echo,
-      currentYear=org.apache.oozie.extensions.OozieELExtensions#ph1_currentYear_echo,
-      lastYear=org.apache.oozie.extensions.OozieELExtensions#ph1_lastYear_echo,
-      dataIn=org.apache.oozie.extensions.OozieELExtensions#ph1_dataIn_echo,
-      instanceTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_nominalTime_echo_wrap,
-      formatTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_formatTime_echo,
-      dateOffset=org.apache.oozie.coord.CoordELFunctions#ph1_coord_dateOffset_echo,
-      user=org.apache.oozie.coord.CoordELFunctions#coord_user
-    </value>
-    <description>Falcon</description>
-  </property>
-
-  <property>
-    <name>oozie.service.ELService.ext.functions.coord-action-start</name>
-    <value>
-      now=org.apache.oozie.extensions.OozieELExtensions#ph2_now,
-      today=org.apache.oozie.extensions.OozieELExtensions#ph2_today,
-      yesterday=org.apache.oozie.extensions.OozieELExtensions#ph2_yesterday,
-      currentMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_currentMonth,
-      lastMonth=org.apache.oozie.extensions.OozieELExtensions#ph2_lastMonth,
-      currentYear=org.apache.oozie.extensions.OozieELExtensions#ph2_currentYear,
-      lastYear=org.apache.oozie.extensions.OozieELExtensions#ph2_lastYear,
-      latest=org.apache.oozie.coord.CoordELFunctions#ph3_coord_latest,
-      future=org.apache.oozie.coord.CoordELFunctions#ph3_coord_future,
-      dataIn=org.apache.oozie.extensions.OozieELExtensions#ph3_dataIn,
-      instanceTime=org.apache.oozie.coord.CoordELFunctions#ph3_coord_nominalTime,
-      dateOffset=org.apache.oozie.coord.CoordELFunctions#ph3_coord_dateOffset,
-      formatTime=org.apache.oozie.coord.CoordELFunctions#ph3_coord_formatTime,
-      user=org.apache.oozie.coord.CoordELFunctions#coord_user
-    </value>
-    <description>Falcon</description>
-  </property>
-
-  <property>
-    <name>oozie.service.ELService.ext.functions.coord-sla-submit</name>
-    <value>
-      instanceTime=org.apache.oozie.coord.CoordELFunctions#ph1_coord_nominalTime_echo_fixed,
-      user=org.apache.oozie.coord.CoordELFunctions#coord_user
-    </value>
-    <description>Falcon</description>
-  </property>
-
-  <property>
-    <name>oozie.service.ELService.ext.functions.coord-sla-create</name>
-    <value>
-      instanceTime=org.apache.oozie.coord.CoordELFunctions#ph2_coord_nominalTime,
-      user=org.apache.oozie.coord.CoordELFunctions#coord_user
-    </value>
-    <description>Falcon</description>
-  </property>
-
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/metainfo.xml
deleted file mode 100644
index 4790cac..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/metainfo.xml
+++ /dev/null
@@ -1,72 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>FALCON</name>
-      <comment>Data management and processing platform</comment>
-      <version>0.4.0.2.0.6.0</version>
-      <components>
-        <component>
-          <name>FALCON_CLIENT</name>
-          <category>CLIENT</category>
-          <commandScript>
-            <script>scripts/falcon_client.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-        <component>
-          <name>FALCON_SERVER</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/falcon_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-      </components>
-
-      <!--TODO: uncomment this after package will be available in repo-->
-      <!--<osSpecifics>-->
-        <!--<osSpecific>-->
-          <!--<osType>any</osType>-->
-          <!--<packages>-->
-            <!--<package>-->
-              <!--<type>rpm</type>-->
-              <!--<name>falcon-0.4.0.2.0.6.0-76</name>-->
-            <!--</package>-->
-          <!--</packages>-->
-        <!--</osSpecific>-->
-      <!--</osSpecifics>-->
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <configuration-dependencies>
-        <config-type>oozie-site</config-type>
-        <config-type>global</config-type>
-      </configuration-dependencies>
-
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/package/scripts/falcon.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/package/scripts/falcon.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/package/scripts/falcon.py
deleted file mode 100644
index d6c3d1d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/package/scripts/falcon.py
+++ /dev/null
@@ -1,66 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-def falcon(type, action = None):
-  import params
-
-  #TODO remove after package will be available in repo
-  Execute("cd /tmp; rm -f falcon-0.4.0.2.0.6.0-76.el6.noarch.rpm; "
-          "wget http://public-repo-1.hortonworks.com/HDP-LABS/Projects/Falcon/2.0.6.0-76/rpm/falcon-0.4.0.2.0.6.0-76.el6.noarch.rpm; "
-          "rpm -Uvh --nodeps falcon-0.4.0.2.0.6.0-76.el6.noarch.rpm",
-          not_if='yum list installed | grep falcon'
-  )
-
-  if type == 'client':
-    if action == 'config':
-      File(params.falcon_conf_dir + '/client.properties',
-           content=Template('client.properties.j2'),
-           mode=0644)
-  elif type == 'server':
-    if action == 'config':
-      Directory(params.falcon_local_dir,
-                owner=params.falcon_user,
-                recursive=True
-      )
-      Directory(params.falcon_data_dir,
-                owner=params.falcon_user,
-                recursive=True
-      )
-      File(params.falcon_conf_dir + '/runtime.properties',
-           content=Template('runtime.properties.j2'),
-           mode=0644
-      )
-      File(params.falcon_conf_dir + '/startup.properties',
-           content=Template('startup.properties.j2'),
-           mode=0644
-      )
-    if action == 'start':
-      Execute(format('env JAVA_HOME={java_home} FALCON_LOG_DIR=/var/log/falcon '
-                     'FALCON_PID_DIR=/var/run/falcon FALCON_DATA_DIR={falcon_data_dir} '
-                     '{falcon_home}/bin/falcon-start -port {falcon_port}'),
-              user=params.falcon_user
-      )
-    if action == 'stop':
-      Execute(format('env JAVA_HOME={java_home} FALCON_LOG_DIR=/var/log/falcon '
-                     'FALCON_PID_DIR=/var/run/falcon FALCON_DATA_DIR={falcon_data_dir} '
-                     '{falcon_home}/bin/falcon-stop'),
-              user=params.falcon_user
-      )

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/package/scripts/falcon_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/package/scripts/falcon_client.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/package/scripts/falcon_client.py
deleted file mode 100644
index ab59b6f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/package/scripts/falcon_client.py
+++ /dev/null
@@ -1,40 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from falcon import falcon
-
-class FalconClient(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env)
-    env.set_params(params)
-    self.config(env)
-
-  def config(self, env):
-    import params
-
-    falcon('client', action='config')
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-if __name__ == "__main__":
-  FalconClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/package/scripts/falcon_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/package/scripts/falcon_server.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/package/scripts/falcon_server.py
deleted file mode 100644
index 9415694..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/package/scripts/falcon_server.py
+++ /dev/null
@@ -1,61 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from falcon import falcon
-
-class FalconServer(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env)
-    env.set_params(params)
-    self.config(env)
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-
-    falcon('server', action='start')
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-
-    falcon('server', action='stop')
-
-
-  def config(self, env):
-    import params
-
-    env.set_params(params)
-
-    falcon('server', action='config')
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    check_process_status(status_params.falcon_pid_dir)
-
-
-if __name__ == "__main__":
-  FalconServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/package/scripts/params.py
deleted file mode 100644
index 7f27862..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/package/scripts/params.py
+++ /dev/null
@@ -1,36 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-oozie_user = config['configurations']['global']['oozie_user']
-falcon_user = config['configurations']['global']['falcon_user']
-smoke_user =  config['configurations']['global']['smokeuser']
-
-java_home = config['hostLevelParams']['java_home']
-falcon_home = '/usr/lib/falcon'
-falcon_conf_dir = '/etc/falcon/conf'
-falcon_local_dir = config['configurations']['global']['falcon_local_dir']
-falcon_log_dir = '/var/log/falcon'
-falcon_data_dir = format('{falcon_local_dir}/activemq')
-store_uri = config['configurations']['global']['falcon_store_uri']
-falcon_host = config['clusterHostInfo']['falcon_server_hosts'][0]
-falcon_port = config['configurations']['global']['falcon_port']

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/package/scripts/service_check.py
deleted file mode 100644
index 19fbaf4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/package/scripts/service_check.py
+++ /dev/null
@@ -1,38 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-
-class FalconServiceCheck(Script):
-
-  def service_check(self, env):
-    import params
-
-    env.set_params(params)
-
-    Execute(format("env JAVA_HOME={java_home} FALCON_LOG_DIR=/var/log/falcon "
-                   "FALCON_PID_DIR=/var/run/falcon FALCON_DATA_DIR={falcon_data_dir} "
-                   "{falcon_home}/bin/falcon admin -version"),
-            user=params.smoke_user,
-            logoutput=True
-    )
-
-if __name__ == "__main__":
-  FalconServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/package/scripts/status_params.py
deleted file mode 100644
index 395766c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/package/scripts/status_params.py
+++ /dev/null
@@ -1,24 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-falcon_pid_dir = '/var/run/falcon'
-server_pid_file = format('{falcon_pid_dir}/falcon.pid')

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/package/templates/client.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/package/templates/client.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/package/templates/client.properties.j2
deleted file mode 100644
index 6ffc110..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/package/templates/client.properties.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-#########################################################################
-##########    This is used for falcon packaging only. ###################
-## Uses default port. Please change if configured for non-default port ##
-#########################################################################
-
-falcon.url=http://{{falcon_host}}:{{falcon_port}}/

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/package/templates/runtime.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/package/templates/runtime.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/package/templates/runtime.properties.j2
deleted file mode 100644
index 677e5e3..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/package/templates/runtime.properties.j2
+++ /dev/null
@@ -1,33 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-####################################################
-####    This is used for falcon packaging only. ####
-####################################################
-
-*.domain=${falcon.app.type}
-
-*.log.cleanup.frequency.minutes.retention=hours(6)
-*.log.cleanup.frequency.hours.retention=minutes(1)
-*.log.cleanup.frequency.days.retention=days(7)
-*.log.cleanup.frequency.months.retention=months(3)
-
-#### To configure falcon servers with prism ####
-#*.all.colos=<comma separated list of colos where falcon servers are installed>
-#*.falcon.<colo>.endpoint=<falcon server endpoint>
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/package/templates/startup.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/package/templates/startup.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/package/templates/startup.properties.j2
deleted file mode 100644
index ade21c9..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/FALCON/package/templates/startup.properties.j2
+++ /dev/null
@@ -1,70 +0,0 @@
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-
-####################################################
-####    This is used for falcon packaging only. ####
-####################################################
-
-*.domain=${falcon.app.type}
-
-######### Implementation classes #########
-## DONT MODIFY UNLESS SURE ABOUT CHANGE ##
-*.workflow.engine.impl=org.apache.falcon.workflow.engine.OozieWorkflowEngine
-*.oozie.process.workflow.builder=org.apache.falcon.workflow.OozieProcessWorkflowBuilder
-*.oozie.feed.workflow.builder=org.apache.falcon.workflow.OozieFeedWorkflowBuilder
-*.journal.impl=org.apache.falcon.transaction.SharedFileSystemJournal
-*.SchedulableEntityManager.impl=org.apache.falcon.resource.SchedulableEntityManager
-*.ConfigSyncService.impl=org.apache.falcon.resource.ConfigSyncService
-*.ProcessInstanceManager.impl=org.apache.falcon.resource.InstanceManager
-*.catalog.service.impl=org.apache.falcon.catalog.HiveCatalogService
-
-*.application.services=org.apache.falcon.entity.store.ConfigurationStore,\
-                        org.apache.falcon.service.ProcessSubscriberService,\
-                        org.apache.falcon.rerun.service.RetryService,\
-						org.apache.falcon.rerun.service.LateRunService,\
-						org.apache.falcon.service.SLAMonitoringService,\
-						org.apache.falcon.service.LogCleanupService
-prism.application.services=org.apache.falcon.entity.store.ConfigurationStore
-*.configstore.listeners=org.apache.falcon.entity.v0.EntityGraph,\
-                        org.apache.falcon.entity.ColoClusterRelation,\
-                        org.apache.falcon.group.FeedGroupMap,\
-                        org.apache.falcon.service.SharedLibraryHostingService
-prism.configstore.listeners=org.apache.falcon.entity.v0.EntityGraph,\
-                        org.apache.falcon.entity.ColoClusterRelation,\
-                        org.apache.falcon.group.FeedGroupMap
-*.broker.impl.class=org.apache.activemq.ActiveMQConnectionFactory
-*.shared.libs=activemq-core,ant,geronimo-j2ee-management,hadoop-distcp,jms,json-simple,oozie-client,spring-jms,s4fs-0.1.jar
-
-######### Implementation classes #########
-
-*.config.store.uri={{store_uri}}
-*.system.lib.location=${falcon.home}/server/webapp/falcon/WEB-INF/lib
-prism.system.lib.location=${falcon.home}/server/webapp/prism/WEB-INF/lib
-*.broker.url=tcp://localhost:61616
-*.retry.recorder.path=${falcon.log.dir}/retry
-
-*.falcon.cleanup.service.frequency=days(1)
-
-#default time-to-live for a JMS message 3 days (time in minutes)
-*.broker.ttlInMins=4320
-*.entity.topic=FALCON.ENTITY.TOPIC
-*.max.retry.failure.count=1
-
-######### Properties for configuring iMon client and metric #########
-*.internal.queue.size=1000
-*.current.colo=default

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/metainfo.xml
deleted file mode 100644
index ad17c06..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/metainfo.xml
+++ /dev/null
@@ -1,101 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>GANGLIA</name>
-      <comment>Ganglia Metrics Collection system</comment>
-      <version>3.5.0</version>
-      <components>
-        <component>
-          <name>GANGLIA_SERVER</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/ganglia_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>GANGLIA_MONITOR</name>
-          <category>SLAVE</category>
-          <commandScript>
-            <script>scripts/ganglia_monitor.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>libganglia-3.5.0-99</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>ganglia-devel-3.5.0-99</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>ganglia-gmetad-3.5.0-99</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>ganglia-web-3.5.7-99.noarch</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>python-rrdtool.x86_64</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>ganglia-gmond-3.5.0-99</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>ganglia-gmond-modules-python-3.5.0-99</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osType>suse</osType>
-          <package>
-            <type>rpm</type>
-            <name>apache2</name>
-          </package>
-          <package>
-            <type>rpm</type>
-            <name>apache2-mod_php5</name>
-          </package>
-        </osSpecific>
-        <osSpecific>
-          <osType>centos6</osType>
-          <package>
-            <type>rpm</type>
-            <name>httpd</name>
-          </package>
-        </osSpecific>
-      </osSpecifics>
-    </service>
-  </services>
-</metainfo>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/checkGmetad.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/checkGmetad.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/checkGmetad.sh
deleted file mode 100644
index e60eb31..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/checkGmetad.sh
+++ /dev/null
@@ -1,37 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./gmetadLib.sh;
-
-# Before checking gmetad, check rrdcached.
-./checkRrdcached.sh;
-
-gmetadRunningPid=`getGmetadRunningPid`;
-
-if [ -n "${gmetadRunningPid}" ]
-then
-  echo "${GMETAD_BIN} running with PID ${gmetadRunningPid}";
-else
-  echo "Failed to find running ${GMETAD_BIN}";
-  exit 1;
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/checkGmond.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/checkGmond.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/checkGmond.sh
deleted file mode 100644
index 0cec8dc..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/checkGmond.sh
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
-source ./gmondLib.sh;
-
-function checkGmondForCluster()
-{
-    gmondClusterName=${1};
-
-    gmondCoreConfFileName=`getGmondCoreConfFileName ${gmondClusterName}`;
-
-    # Skip over (purported) Clusters that don't have their core conf file present.
-    if [ -e "${gmondCoreConfFileName}" ]
-    then 
-      gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
-
-      if [ -n "${gmondRunningPid}" ]
-      then
-        echo "${GMOND_BIN} for cluster ${gmondClusterName} running with PID ${gmondRunningPid}";
-      else
-        echo "Failed to find running ${GMOND_BIN} for cluster ${gmondClusterName}";
-        exit 1;
-      fi
-    fi
-}
-
-# main()
-gmondClusterName=${1};
-
-if [ "x" == "x${gmondClusterName}" ]
-then
-    # No ${gmondClusterName} passed in as command-line arg, so check
-    # all the gmonds we know about.
-    for gmondClusterName in `getConfiguredGangliaClusterNames`
-    do
-        checkGmondForCluster ${gmondClusterName};
-    done
-else
-    # Just check the one ${gmondClusterName} that was asked for.
-    checkGmondForCluster ${gmondClusterName};
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/checkRrdcached.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/checkRrdcached.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/checkRrdcached.sh
deleted file mode 100644
index d94db5d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/checkRrdcached.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./rrdcachedLib.sh;
-
-rrdcachedRunningPid=`getRrdcachedRunningPid`;
-
-if [ -n "${rrdcachedRunningPid}" ]
-then
-  echo "${RRDCACHED_BIN} running with PID ${rrdcachedRunningPid}";
-else
-  echo "Failed to find running ${RRDCACHED_BIN}";
-  exit 1;
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/gmetad.init
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/gmetad.init b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/gmetad.init
deleted file mode 100644
index 20b388e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/gmetad.init
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/bin/sh
-# chkconfig: 2345 70 40
-# description: hdp-gmetad startup script
-# processname: hdp-gmetad
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-# Remember to keep this in-sync with the definition of 
-# GANGLIA_RUNTIME_COMPONENTS_UNPACK_DIR in monrpmInstaller.sh.
-HDP_GANGLIA_RUNTIME_COMPONENTS_DIR=/usr/libexec/hdp/ganglia
-HDP_GANLIA_GMETAD_STARTER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/startGmetad.sh
-HDP_GANLIA_GMETAD_STOPPER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/stopGmetad.sh
-HDP_GANLIA_GMETAD_CHECKER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/checkGmetad.sh
-
-RETVAL=0
-
-case "$1" in
-   start)
-      echo "============================="
-      echo "Starting hdp-gmetad..."
-      echo "============================="
-      [ -f ${HDP_GANLIA_GMETAD_STARTER} ] || exit 1
-      eval "${HDP_GANLIA_GMETAD_STARTER}"
-      RETVAL=$?
-      echo
-      [ $RETVAL -eq 0 ] && touch /var/lock/subsys/hdp-gmetad
-      ;;
-
-  stop)
-      echo "=================================="
-      echo "Shutting down hdp-gmetad..."
-      echo "=================================="
-      [ -f ${HDP_GANLIA_GMETAD_STOPPER} ] || exit 1
-      eval "${HDP_GANLIA_GMETAD_STOPPER}"
-      RETVAL=$?
-      echo
-      [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hdp-gmetad
-      ;;
-
-  restart|reload)
-   	$0 stop
-   	$0 start
-   	RETVAL=$?
-	;;
-  status)
-      echo "======================================="
-      echo "Checking status of hdp-gmetad..."
-      echo "======================================="
-      [ -f ${HDP_GANLIA_GMETAD_CHECKER} ] || exit 1
-      eval "${HDP_GANLIA_GMETAD_CHECKER}"
-      RETVAL=$?
-      ;;
-  *)
-	echo "Usage: $0 {start|stop|restart|status}"
-	exit 1
-esac
-
-exit $RETVAL

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/gmetadLib.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/gmetadLib.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/gmetadLib.sh
deleted file mode 100644
index e28610e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/gmetadLib.sh
+++ /dev/null
@@ -1,204 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Slurp in all our user-customizable settings.
-source ./gangliaEnv.sh;
-
-# Get access to Ganglia-wide constants etc.
-source ./gangliaLib.sh;
-
-GMETAD_BIN=/usr/sbin/gmetad;
-GMETAD_CONF_FILE=${GANGLIA_CONF_DIR}/gmetad.conf;
-GMETAD_PID_FILE=${GANGLIA_RUNTIME_DIR}/gmetad.pid;
-
-function getGmetadLoggedPid()
-{
-    if [ -e "${GMETAD_PID_FILE}" ]
-    then
-        echo `cat ${GMETAD_PID_FILE}`;
-    fi
-}
-
-function getGmetadRunningPid()
-{
-    gmetadLoggedPid=`getGmetadLoggedPid`;
-
-    if [ -n "${gmetadLoggedPid}" ]
-    then
-        echo `ps -o pid=MYPID -p ${gmetadLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
-    fi
-}
-
-function generateGmetadConf()
-{
-    now=`date`;
-
-    cat <<END_OF_GMETAD_CONF_1
-#################### Generated by ${0} on ${now} ####################
-#
-#-------------------------------------------------------------------------------
-# Setting the debug_level to 1 will keep daemon in the forground and
-# show only error messages. Setting this value higher than 1 will make 
-# gmetad output debugging information and stay in the foreground.
-# default: 0
-# debug_level 10
-#
-#-------------------------------------------------------------------------------
-# What to monitor. The most important section of this file. 
-#
-# The data_source tag specifies either a cluster or a grid to
-# monitor. If we detect the source is a cluster, we will maintain a complete
-# set of RRD databases for it, which can be used to create historical 
-# graphs of the metrics. If the source is a grid (it comes from another gmetad),
-# we will only maintain summary RRDs for it.
-#
-# Format: 
-# data_source "my cluster" [polling interval] address1:port addreses2:port ...
-# 
-# The keyword 'data_source' must immediately be followed by a unique
-# string which identifies the source, then an optional polling interval in 
-# seconds. The source will be polled at this interval on average. 
-# If the polling interval is omitted, 15sec is asssumed. 
-#
-# If you choose to set the polling interval to something other than the default,
-# note that the web frontend determines a host as down if its TN value is less
-# than 4 * TMAX (20sec by default).  Therefore, if you set the polling interval
-# to something around or greater than 80sec, this will cause the frontend to
-# incorrectly display hosts as down even though they are not.
-#
-# A list of machines which service the data source follows, in the 
-# format ip:port, or name:port. If a port is not specified then 8649
-# (the default gmond port) is assumed.
-# default: There is no default value
-#
-# data_source "my cluster" 10 localhost  my.machine.edu:8649  1.2.3.5:8655
-# data_source "my grid" 50 1.3.4.7:8655 grid.org:8651 grid-backup.org:8651
-# data_source "another source" 1.3.4.7:8655  1.3.4.8
-END_OF_GMETAD_CONF_1
-
-    # Get info about all the configured Ganglia clusters.
-    getGangliaClusterInfo | while read gangliaClusterInfoLine
-    do
-        # From each, parse out ${gmondClusterName}, ${gmondMasterIP} and ${gmondPort}... 
-        read gmondClusterName gmondMasterIP gmondPort <<<`echo ${gangliaClusterInfoLine}`;
-        # ...and generate a corresponding data_source line for gmetad.conf. 
-        echo "data_source \"${gmondClusterName}\" ${gmondMasterIP}:${gmondPort}";
-    done
-
-    cat <<END_OF_GMETAD_CONF_2
-#
-# Round-Robin Archives
-# You can specify custom Round-Robin archives here (defaults are listed below)
-#
-# Old Default RRA: Keep 1 hour of metrics at 15 second resolution. 1 day at 6 minute
-# RRAs "RRA:AVERAGE:0.5:1:244" "RRA:AVERAGE:0.5:24:244" "RRA:AVERAGE:0.5:168:244" "RRA:AVERAGE:0.5:672:244" \
-#      "RRA:AVERAGE:0.5:5760:374"
-# New Default RRA
-# Keep 5856 data points at 15 second resolution assuming 15 second (default) polling. That's 1 day
-# Two weeks of data points at 1 minute resolution (average)
-#RRAs "RRA:AVERAGE:0.5:1:5856" "RRA:AVERAGE:0.5:4:20160" "RRA:AVERAGE:0.5:40:52704"
-# Retaining existing resolution
-RRAs "RRA:AVERAGE:0.5:1:244" "RRA:AVERAGE:0.5:24:244" "RRA:AVERAGE:0.5:168:244" "RRA:AVERAGE:0.5:672:244" \
-     "RRA:AVERAGE:0.5:5760:374"
-#
-#-------------------------------------------------------------------------------
-# Scalability mode. If on, we summarize over downstream grids, and respect
-# authority tags. If off, we take on 2.5.0-era behavior: we do not wrap our output
-# in <GRID></GRID> tags, we ignore all <GRID> tags we see, and always assume
-# we are the "authority" on data source feeds. This approach does not scale to
-# large groups of clusters, but is provided for backwards compatibility.
-# default: on
-# scalable off
-#
-#-------------------------------------------------------------------------------
-# The name of this Grid. All the data sources above will be wrapped in a GRID
-# tag with this name.
-# default: unspecified
-gridname "HDP_GRID"
-#
-#-------------------------------------------------------------------------------
-# The authority URL for this grid. Used by other gmetads to locate graphs
-# for our data sources. Generally points to a ganglia/
-# website on this machine.
-# default: "http://hostname/ganglia/",
-#   where hostname is the name of this machine, as defined by gethostname().
-# authority "http://mycluster.org/newprefix/"
-#
-#-------------------------------------------------------------------------------
-# List of machines this gmetad will share XML with. Localhost
-# is always trusted. 
-# default: There is no default value
-# trusted_hosts 127.0.0.1 169.229.50.165 my.gmetad.org
-#
-#-------------------------------------------------------------------------------
-# If you want any host which connects to the gmetad XML to receive
-# data, then set this value to "on"
-# default: off
-# all_trusted on
-#
-#-------------------------------------------------------------------------------
-# If you don't want gmetad to setuid then set this to off
-# default: on
-# setuid off
-#
-#-------------------------------------------------------------------------------
-# User gmetad will setuid to (defaults to "nobody")
-# default: "nobody"
-setuid_username "${GMETAD_USER}"
-#
-#-------------------------------------------------------------------------------
-# Umask to apply to created rrd files and grid directory structure
-# default: 0 (files are public)
-# umask 022
-#
-#-------------------------------------------------------------------------------
-# The port gmetad will answer requests for XML
-# default: 8651
-# xml_port 8651
-#
-#-------------------------------------------------------------------------------
-# The port gmetad will answer queries for XML. This facility allows
-# simple subtree and summation views of the XML tree.
-# default: 8652
-# interactive_port 8652
-#
-#-------------------------------------------------------------------------------
-# The number of threads answering XML requests
-# default: 4
-# server_threads 10
-#
-#-------------------------------------------------------------------------------
-# Where gmetad stores its round-robin databases
-# default: "/var/lib/ganglia/rrds"
-# rrd_rootdir "/some/other/place"
-#
-#-------------------------------------------------------------------------------
-# In earlier versions of gmetad, hostnames were handled in a case
-# sensitive manner
-# If your hostname directories have been renamed to lower case,
-# set this option to 0 to disable backward compatibility.
-# From version 3.2, backwards compatibility will be disabled by default.
-# default: 1   (for gmetad < 3.2)
-# default: 0   (for gmetad >= 3.2)
-case_sensitive_hostnames 1
-END_OF_GMETAD_CONF_2
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/gmond.init
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/gmond.init b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/gmond.init
deleted file mode 100644
index afb7026..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/gmond.init
+++ /dev/null
@@ -1,73 +0,0 @@
-#!/bin/sh
-# chkconfig: 2345 70 40
-# description: hdp-gmond startup script
-# processname: hdp-gmond
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-# Remember to keep this in-sync with the definition of 
-# GANGLIA_RUNTIME_COMPONENTS_UNPACK_DIR in monrpmInstaller.sh.
-HDP_GANGLIA_RUNTIME_COMPONENTS_DIR=/usr/libexec/hdp/ganglia
-HDP_GANLIA_GMOND_STARTER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/startGmond.sh
-HDP_GANLIA_GMOND_STOPPER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/stopGmond.sh
-HDP_GANLIA_GMOND_CHECKER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/checkGmond.sh
-
-RETVAL=0
-
-case "$1" in
-   start)
-      echo "============================="
-      echo "Starting hdp-gmond..."
-      echo "============================="
-      [ -f ${HDP_GANLIA_GMOND_STARTER} ] || exit 1
-      eval "${HDP_GANLIA_GMOND_STARTER}"
-      RETVAL=$?
-      echo
-      [ $RETVAL -eq 0 ] && touch /var/lock/subsys/hdp-gmond
-      ;;
-
-  stop)
-      echo "=================================="
-      echo "Shutting down hdp-gmond..."
-      echo "=================================="
-      [ -f ${HDP_GANLIA_GMOND_STOPPER} ] || exit 1
-      eval "${HDP_GANLIA_GMOND_STOPPER}"
-      RETVAL=$?
-      echo
-      [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hdp-gmond
-      ;;
-
-  restart|reload)
-   	$0 stop
-   	$0 start
-   	RETVAL=$?
-	;;
-  status)
-      echo "======================================="
-      echo "Checking status of hdp-gmond..."
-      echo "======================================="
-      [ -f ${HDP_GANLIA_GMOND_CHECKER} ] || exit 1
-      eval "${HDP_GANLIA_GMOND_CHECKER}"
-      RETVAL=$?
-      ;;
-  *)
-	echo "Usage: $0 {start|stop|restart|status}"
-	exit 1
-esac
-
-exit $RETVAL

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/gmondLib.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/gmondLib.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/gmondLib.sh
deleted file mode 100644
index 87da4dd..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/gmondLib.sh
+++ /dev/null
@@ -1,545 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Slurp in all our user-customizable settings.
-source ./gangliaEnv.sh;
-
-# Get access to Ganglia-wide constants etc.
-source ./gangliaLib.sh;
-
-GMOND_BIN=/usr/sbin/gmond;
-GMOND_CORE_CONF_FILE=gmond.core.conf;
-GMOND_MASTER_CONF_FILE=gmond.master.conf;
-GMOND_SLAVE_CONF_FILE=gmond.slave.conf;
-GMOND_PID_FILE=gmond.pid;
-
-# Functions.
-function getGmondCoreConfFileName()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        # ${clusterName} is not empty. 
-        echo "${GANGLIA_CONF_DIR}/${clusterName}/${GMOND_CORE_CONF_FILE}";
-    else
-        echo "${GANGLIA_CONF_DIR}/${GMOND_CORE_CONF_FILE}";
-    fi
-}
-
-function getGmondMasterConfFileName()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        # ${clusterName} is not empty. 
-        echo "${GANGLIA_CONF_DIR}/${clusterName}/conf.d/${GMOND_MASTER_CONF_FILE}";
-    else
-        echo "${GANGLIA_CONF_DIR}/conf.d/${GMOND_MASTER_CONF_FILE}";
-    fi
-}
-
-function getGmondSlaveConfFileName()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        # ${clusterName} is not empty. 
-        echo "${GANGLIA_CONF_DIR}/${clusterName}/conf.d/${GMOND_SLAVE_CONF_FILE}";
-    else
-        echo "${GANGLIA_CONF_DIR}/conf.d/${GMOND_SLAVE_CONF_FILE}";
-    fi
-}
-
-function getGmondPidFileName()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        # ${clusterName} is not empty. 
-        echo "${GANGLIA_RUNTIME_DIR}/${clusterName}/${GMOND_PID_FILE}";
-    else
-        echo "${GANGLIA_RUNTIME_DIR}/${GMOND_PID_FILE}";
-    fi
-}
-
-function getGmondLoggedPid()
-{
-    gmondPidFile=`getGmondPidFileName ${1}`;
-
-    if [ -e "${gmondPidFile}" ]
-    then
-        echo `cat ${gmondPidFile}`;
-    fi
-}
-
-function getGmondRunningPid()
-{
-    gmondLoggedPid=`getGmondLoggedPid ${1}`;
-
-    if [ -n "${gmondLoggedPid}" ]
-    then
-        echo `ps -o pid=MYPID -p ${gmondLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
-    fi
-}
-
-function generateGmondCoreConf()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
-
-        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
-        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
-        then
-            now=`date`;
-
-            cat << END_OF_GMOND_CORE_CONF
-#################### Generated by ${0} on ${now} ####################
-#
-/* This configuration is as close to 2.5.x default behavior as possible
-   The values closely match ./gmond/metric.h definitions in 2.5.x */
-globals {
-  daemonize = yes
-  setuid = yes
-  user = ${GMOND_USER}
-  debug_level = 0
-  max_udp_msg_len = 1472
-  mute = no
-  deaf = no 
-  allow_extra_data = yes
-  host_dmax = 0 /*secs */
-  host_tmax = 20 /*secs */
-  cleanup_threshold = 300 /*secs */
-  gexec = no
-  send_metadata_interval = 30 /*secs */
-}
-
-/*
- * The cluster attributes specified will be used as part of the <CLUSTER>
- * tag that will wrap all hosts collected by this instance.
- */
-cluster {
-  name = "${gmondClusterName}"
-  owner = "unspecified"
-  latlong = "unspecified"
-  url = "unspecified"
-}
-
-/* The host section describes attributes of the host, like the location */
-host {
-  location = "unspecified"
-}
-
-/* You can specify as many tcp_accept_channels as you like to share
- * an XML description of the state of the cluster.
- *
- * At the very least, every gmond must expose its XML state to 
- * queriers from localhost.
- */
-tcp_accept_channel {
-  bind = localhost
-  port = ${gmondPort}
-}
-
-/* Each metrics module that is referenced by gmond must be specified and
-   loaded. If the module has been statically linked with gmond, it does
-   not require a load path. However all dynamically loadable modules must
-   include a load path. */
-modules {
-  module {
-    name = "core_metrics"
-  }
-  module {
-    name = "cpu_module"
-    path = "modcpu.so"
-  }
-  module {
-    name = "disk_module"
-    path = "moddisk.so"
-  }
-  module {
-    name = "load_module"
-    path = "modload.so"
-  }
-  module {
-    name = "mem_module"
-    path = "modmem.so"
-  }
-  module {
-    name = "net_module"
-    path = "modnet.so"
-  }
-  module {
-    name = "proc_module"
-    path = "modproc.so"
-  }
-  module {
-    name = "sys_module"
-    path = "modsys.so"
-  }
-}
-
-/* The old internal 2.5.x metric array has been replaced by the following
-   collection_group directives.  What follows is the default behavior for
-   collecting and sending metrics that is as close to 2.5.x behavior as
-   possible. */
-
-/* This collection group will cause a heartbeat (or beacon) to be sent every
-   20 seconds.  In the heartbeat is the GMOND_STARTED data which expresses
-   the age of the running gmond. */
-collection_group {
-  collect_once = yes
-  time_threshold = 20
-  metric {
-    name = "heartbeat"
-  }
-}
-
-/* This collection group will send general info about this host total memory every
-   180 secs.
-   This information doesn't change between reboots and is only collected
-   once. This information needed for heatmap showing */
- collection_group {
-   collect_once = yes
-   time_threshold = 180
-   metric {
-    name = "mem_total"
-    title = "Memory Total"
-   }
- }
-
-/* This collection group will send general info about this host every
-   1200 secs.
-   This information doesn't change between reboots and is only collected
-   once. */
-collection_group {
-  collect_once = yes
-  time_threshold = 1200
-  metric {
-    name = "cpu_num"
-    title = "CPU Count"
-  }
-  metric {
-    name = "cpu_speed"
-    title = "CPU Speed"
-  }
-  /* Should this be here? Swap can be added/removed between reboots. */
-  metric {
-    name = "swap_total"
-    title = "Swap Space Total"
-  }
-  metric {
-    name = "boottime"
-    title = "Last Boot Time"
-  }
-  metric {
-    name = "machine_type"
-    title = "Machine Type"
-  }
-  metric {
-    name = "os_name"
-    title = "Operating System"
-  }
-  metric {
-    name = "os_release"
-    title = "Operating System Release"
-  }
-  metric {
-    name = "location"
-    title = "Location"
-  }
-}
-
-/* This collection group will send the status of gexecd for this host
-   every 300 secs.*/
-/* Unlike 2.5.x the default behavior is to report gexecd OFF. */
-collection_group {
-  collect_once = yes
-  time_threshold = 300
-  metric {
-    name = "gexec"
-    title = "Gexec Status"
-  }
-}
-
-/* This collection group will collect the CPU status info every 20 secs.
-   The time threshold is set to 90 seconds.  In honesty, this
-   time_threshold could be set significantly higher to reduce
-   unneccessary  network chatter. */
-collection_group {
-  collect_every = 20
-  time_threshold = 90
-  /* CPU status */
-  metric {
-    name = "cpu_user"
-    value_threshold = "1.0"
-    title = "CPU User"
-  }
-  metric {
-    name = "cpu_system"
-    value_threshold = "1.0"
-    title = "CPU System"
-  }
-  metric {
-    name = "cpu_idle"
-    value_threshold = "5.0"
-    title = "CPU Idle"
-  }
-  metric {
-    name = "cpu_nice"
-    value_threshold = "1.0"
-    title = "CPU Nice"
-  }
-  metric {
-    name = "cpu_aidle"
-    value_threshold = "5.0"
-    title = "CPU aidle"
-  }
-  metric {
-    name = "cpu_wio"
-    value_threshold = "1.0"
-    title = "CPU wio"
-  }
-  /* The next two metrics are optional if you want more detail...
-     ... since they are accounted for in cpu_system.
-  metric {
-    name = "cpu_intr"
-    value_threshold = "1.0"
-    title = "CPU intr"
-  }
-  metric {
-    name = "cpu_sintr"
-    value_threshold = "1.0"
-    title = "CPU sintr"
-  }
-  */
-}
-
-collection_group {
-  collect_every = 20
-  time_threshold = 90
-  /* Load Averages */
-  metric {
-    name = "load_one"
-    value_threshold = "1.0"
-    title = "One Minute Load Average"
-  }
-  metric {
-    name = "load_five"
-    value_threshold = "1.0"
-    title = "Five Minute Load Average"
-  }
-  metric {
-    name = "load_fifteen"
-    value_threshold = "1.0"
-    title = "Fifteen Minute Load Average"
-  }
-}
-
-/* This group collects the number of running and total processes */
-collection_group {
-  collect_every = 80
-  time_threshold = 950
-  metric {
-    name = "proc_run"
-    value_threshold = "1.0"
-    title = "Total Running Processes"
-  }
-  metric {
-    name = "proc_total"
-    value_threshold = "1.0"
-    title = "Total Processes"
-  }
-}
-
-/* This collection group grabs the volatile memory metrics every 40 secs and
-   sends them at least every 180 secs.  This time_threshold can be increased
-   significantly to reduce unneeded network traffic. */
-collection_group {
-  collect_every = 40
-  time_threshold = 180
-  metric {
-    name = "mem_free"
-    value_threshold = "1024.0"
-    title = "Free Memory"
-  }
-  metric {
-    name = "mem_shared"
-    value_threshold = "1024.0"
-    title = "Shared Memory"
-  }
-  metric {
-    name = "mem_buffers"
-    value_threshold = "1024.0"
-    title = "Memory Buffers"
-  }
-  metric {
-    name = "mem_cached"
-    value_threshold = "1024.0"
-    title = "Cached Memory"
-  }
-  metric {
-    name = "swap_free"
-    value_threshold = "1024.0"
-    title = "Free Swap Space"
-  }
-}
-
-collection_group {
-  collect_every = 40
-  time_threshold = 300
-  metric {
-    name = "bytes_out"
-    value_threshold = 4096
-    title = "Bytes Sent"
-  }
-  metric {
-    name = "bytes_in"
-    value_threshold = 4096
-    title = "Bytes Received"
-  }
-  metric {
-    name = "pkts_in"
-    value_threshold = 256
-    title = "Packets Received"
-  }
-  metric {
-    name = "pkts_out"
-    value_threshold = 256
-    title = "Packets Sent"
-  }
-}
-
-
-collection_group {
-  collect_every = 40
-  time_threshold = 180
-  metric {
-    name = "disk_free"
-    value_threshold = 1.0
-    title = "Disk Space Available"
-  }
-  metric {
-    name = "part_max_used"
-    value_threshold = 1.0
-    title = "Maximum Disk Space Used"
-  }
-  metric {
-    name = "disk_total"
-    value_threshold = 1.0
-    title = "Total Disk Space"
-  }
-}
-
-udp_recv_channel {
-    port = 0
-}
-
-
-include ("${GANGLIA_CONF_DIR}/${gmondClusterName}/conf.d/*.conf")
-END_OF_GMOND_CORE_CONF
-        else
-            return 2;
-        fi
-    else
-        return 1;
-    fi
-}
-
-function generateGmondMasterConf
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
-
-        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
-        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
-        then
-            now=`date`;
-
-            cat << END_OF_GMOND_MASTER_CONF
-#################### Generated by ${0} on ${now} ####################
-/* Masters only receive; they never send. */
-udp_recv_channel {
-  bind = ${gmondMasterIP}
-  port = ${gmondPort}
-}
-
-/* The gmond cluster master must additionally provide an XML 
- * description of the cluster to the gmetad that will query it.
- */
-tcp_accept_channel {
-  bind = ${gmondMasterIP}
-  port = ${gmondPort}
-}
-END_OF_GMOND_MASTER_CONF
-        else
-            return 2;
-        fi
-    else
-        return 1;
-    fi
-}
-
-function generateGmondSlaveConf
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
-
-        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
-        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
-        then
-            now=`date`;
-
-            cat << END_OF_GMOND_SLAVE_CONF
-#################### Generated by ${0} on ${now} ####################
-/* Slaves only send; they never receive. */
-udp_send_channel {
-  #bind_hostname = yes # Highly recommended, soon to be default.
-                       # This option tells gmond to use a source address
-                       # that resolves to the machine's hostname.  Without
-                       # this, the metrics may appear to come from any
-                       # interface and the DNS names associated with
-                       # those IPs will be used to create the RRDs.
-  host = ${gmondMasterIP}
-  port = ${gmondPort}
-  ttl = 1
-}
-END_OF_GMOND_SLAVE_CONF
-        else
-            return 2;
-        fi
-    else
-        return 1;
-    fi
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/rrd.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/rrd.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/rrd.py
deleted file mode 100644
index 3fe6901..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/rrd.py
+++ /dev/null
@@ -1,213 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import cgi
-import os
-import rrdtool
-import sys
-import time
-import re
-import urlparse
-
-# place this script in /var/www/cgi-bin of the Ganglia collector
-# requires 'yum install rrdtool-python' on the Ganglia collector
-
-
-def printMetric(clusterName, hostName, metricName, file, cf, start, end,
-                resolution, pointInTime):
-  if clusterName.endswith("rrds"):
-    clusterName = ""
-
-  args = [file, cf]
-
-  if start is not None:
-    args.extend(["-s", start])
-
-  if end is not None:
-    args.extend(["-e", end])
-
-  if resolution is not None:
-    args.extend(["-r", resolution])
-
-  rrdMetric = rrdtool.fetch(args)
-  # ds_name
-  sys.stdout.write(rrdMetric[1][0])
-  sys.stdout.write("\n")
-
-  sys.stdout.write(clusterName)
-  sys.stdout.write("\n")
-  sys.stdout.write(hostName)
-  sys.stdout.write("\n")
-  sys.stdout.write(metricName)
-  sys.stdout.write("\n")
-
-  # write time
-  sys.stdout.write(str(rrdMetric[0][0]))
-  sys.stdout.write("\n")
-  # write step
-  sys.stdout.write(str(rrdMetric[0][2]))
-  sys.stdout.write("\n")
-
-  if not pointInTime:
-    valueCount = 0
-    lastValue = None
-
-    for tuple in rrdMetric[2]:
-
-      thisValue = tuple[0]
-
-      if valueCount > 0 and thisValue == lastValue:
-        valueCount += 1
-      else:
-        if valueCount > 1:
-          sys.stdout.write("[~r]")
-          sys.stdout.write(str(valueCount))
-          sys.stdout.write("\n")
-
-        if thisValue is None:
-          sys.stdout.write("[~n]\n")
-        else:
-          sys.stdout.write(str(thisValue))
-          sys.stdout.write("\n")
-
-        valueCount = 1
-        lastValue = thisValue
-  else:
-    value = None
-    idx = -1
-    tuple = rrdMetric[2]
-    tupleLastIdx = len(tuple) * -1
-
-    while value is None and idx >= tupleLastIdx:
-      value = tuple[idx][0]
-      idx -= 1
-
-    if value is not None:
-      sys.stdout.write(str(value))
-      sys.stdout.write("\n")
-
-  sys.stdout.write("[~EOM]\n")
-  return
-
-
-def stripList(l):
-  return ([x.strip() for x in l])
-
-
-sys.stdout.write("Content-type: text/plain\n\n")
-
-# write start time
-sys.stdout.write(str(time.mktime(time.gmtime())))
-sys.stdout.write("\n")
-
-requestMethod = os.environ['REQUEST_METHOD']
-
-if requestMethod == 'POST':
-  postData = sys.stdin.readline()
-  queryString = cgi.parse_qs(postData)
-  queryString = dict((k, v[0]) for k, v in queryString.items())
-elif requestMethod == 'GET':
-  queryString = dict(cgi.parse_qsl(os.environ['QUERY_STRING']));
-
-if "m" in queryString:
-  metricParts = queryString["m"].split(",")
-else:
-  metricParts = [""]
-metricParts = stripList(metricParts)
-
-hostParts = []
-if "h" in queryString:
-  hostParts = queryString["h"].split(",")
-hostParts = stripList(hostParts)
-
-if "c" in queryString:
-  clusterParts = queryString["c"].split(",")
-else:
-  clusterParts = [""]
-clusterParts = stripList(clusterParts)
-
-if "p" in queryString:
-  rrdPath = queryString["p"]
-else:
-  rrdPath = "/var/lib/ganglia/rrds/"
-
-start = None
-if "s" in queryString:
-  start = queryString["s"]
-
-end = None
-if "e" in queryString:
-  end = queryString["e"]
-
-resolution = None
-if "r" in queryString:
-  resolution = queryString["r"]
-
-if "cf" in queryString:
-  cf = queryString["cf"]
-else:
-  cf = "AVERAGE"
-
-if "pt" in queryString:
-  pointInTime = True
-else:
-  pointInTime = False
-
-
-def _walk(*args, **kwargs):
-  for root, dirs, files in os.walk(*args, **kwargs):
-    for dir in dirs:
-      qualified_dir = os.path.join(root, dir)
-      if os.path.islink(qualified_dir):
-        for x in os.walk(qualified_dir, **kwargs):
-          yield x
-    yield (root, dirs, files)
-
-
-for cluster in clusterParts:
-  for path, dirs, files in _walk(rrdPath + cluster):
-    pathParts = path.split("/")
-    #Process only path which contains files. If no host parameter passed - process all hosts folders and summary info
-    #If host parameter passed - process only this host folder
-    if len(files) > 0 and (len(hostParts) == 0 or pathParts[-1] in hostParts):
-      for metric in metricParts:
-        file = metric + ".rrd"
-        fileFullPath = os.path.join(path, file)
-        if os.path.exists(fileFullPath):
-          #Exact name of metric
-          printMetric(pathParts[-2], pathParts[-1], file[:-4],
-                      os.path.join(path, file), cf, start, end, resolution,
-                      pointInTime)
-        else:
-          #Regex as metric name
-          metricRegex = metric + '\.rrd$'
-          p = re.compile(metricRegex)
-          matchedFiles = filter(p.match, files)
-          for matchedFile in matchedFiles:
-            printMetric(pathParts[-2], pathParts[-1], matchedFile[:-4],
-                        os.path.join(path, matchedFile), cf, start, end,
-                        resolution, pointInTime)
-
-sys.stdout.write("[~EOF]\n")
-# write end time
-sys.stdout.write(str(time.mktime(time.gmtime())))
-sys.stdout.write("\n")
-
-sys.stdout.flush

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/rrdcachedLib.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/rrdcachedLib.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/rrdcachedLib.sh
deleted file mode 100644
index 8b7c257..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/rrdcachedLib.sh
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get access to Ganglia-wide constants etc.
-source ./gangliaLib.sh;
-
-RRDCACHED_BIN=/usr/bin/rrdcached;
-RRDCACHED_PID_FILE=${GANGLIA_RUNTIME_DIR}/rrdcached.pid;
-RRDCACHED_ALL_ACCESS_UNIX_SOCKET=${GANGLIA_RUNTIME_DIR}/rrdcached.sock;
-RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET=${GANGLIA_RUNTIME_DIR}/rrdcached.limited.sock;
-
-function getRrdcachedLoggedPid()
-{
-    if [ -e "${RRDCACHED_PID_FILE}" ]
-    then
-        echo `cat ${RRDCACHED_PID_FILE}`;
-    fi
-}
-
-function getRrdcachedRunningPid()
-{
-    rrdcachedLoggedPid=`getRrdcachedLoggedPid`;
-
-    if [ -n "${rrdcachedLoggedPid}" ]
-    then
-        echo `ps -o pid=MYPID -p ${rrdcachedLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
-    fi
-}


[35/37] AMBARI-4341. Rename 2.0.8 to 2.1.1 in the stack definition. (mahadev)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/startRrdcached.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/startRrdcached.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/startRrdcached.sh
new file mode 100644
index 0000000..e79472b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/startRrdcached.sh
@@ -0,0 +1,69 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Slurp in all our user-customizable settings.
+source ./gangliaEnv.sh;
+
+# Get all our common constants etc. set up.
+source ./rrdcachedLib.sh;
+
+rrdcachedRunningPid=`getRrdcachedRunningPid`;
+
+# Only attempt to start rrdcached if there's not already one running.
+if [ -z "${rrdcachedRunningPid}" ]
+then
+    #changed because problem puppet had with nobody user
+    #sudo -u ${GMETAD_USER} ${RRDCACHED_BIN} -p ${RRDCACHED_PID_FILE} \
+    #         -m 664 -l unix:${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
+    #         -m 777 -P FLUSH,STATS,HELP -l unix:${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET} \
+    #         -b /var/lib/ganglia/rrds -B
+    su - ${GMETAD_USER} -c "${RRDCACHED_BIN} -p ${RRDCACHED_PID_FILE} \
+             -m 664 -l unix:${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
+             -m 777 -P FLUSH,STATS,HELP -l unix:${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET} \
+             -b ${RRDCACHED_BASE_DIR} -B"
+
+    # Ideally, we'd use ${RRDCACHED_BIN}'s -s ${WEBSERVER_GROUP} option for 
+    # this, but it doesn't take sometimes due to a lack of permissions,
+    # so perform the operation explicitly to be super-sure.
+    chgrp ${WEBSERVER_GROUP} ${RRDCACHED_ALL_ACCESS_UNIX_SOCKET};
+    chgrp ${WEBSERVER_GROUP} ${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET};
+
+    # Check to make sure rrdcached actually started up.
+    for i in `seq 0 5`; do
+      rrdcachedRunningPid=`getRrdcachedRunningPid`;
+      if [ -n "${rrdcachedRunningPid}" ]
+        then
+          break;
+      fi
+      sleep 1;
+    done
+
+    if [ -n "${rrdcachedRunningPid}" ]
+    then
+        echo "Started ${RRDCACHED_BIN} with PID ${rrdcachedRunningPid}";
+    else
+        echo "Failed to start ${RRDCACHED_BIN}";
+        exit 1;
+    fi
+else
+    echo "${RRDCACHED_BIN} already running with PID ${rrdcachedRunningPid}";
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/stopGmetad.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/stopGmetad.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/stopGmetad.sh
new file mode 100644
index 0000000..2764e0e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/stopGmetad.sh
@@ -0,0 +1,43 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+source ./gmetadLib.sh;
+
+gmetadRunningPid=`getGmetadRunningPid`;
+
+# Only go ahead with the termination if we could find a running PID.
+if [ -n "${gmetadRunningPid}" ]
+then
+    kill -KILL ${gmetadRunningPid};
+    echo "Stopped ${GMETAD_BIN} (with PID ${gmetadRunningPid})";
+fi
+
+# Poll again.
+gmetadRunningPid=`getGmetadRunningPid`;
+
+# Once we've killed gmetad, there should no longer be a running PID.
+if [ -z "${gmetadRunningPid}" ]
+then
+    # It's safe to stop rrdcached now.
+    ./stopRrdcached.sh;
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/stopGmond.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/stopGmond.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/stopGmond.sh
new file mode 100644
index 0000000..1af3eb9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/stopGmond.sh
@@ -0,0 +1,54 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
+source ./gmondLib.sh;
+
+function stopGmondForCluster()
+{
+    gmondClusterName=${1};
+
+    gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
+
+    # Only go ahead with the termination if we could find a running PID.
+    if [ -n "${gmondRunningPid}" ]
+    then
+      kill -KILL ${gmondRunningPid};
+      echo "Stopped ${GMOND_BIN} for cluster ${gmondClusterName} (with PID ${gmondRunningPid})";
+    fi
+}
+
+# main()
+gmondClusterName=${1};
+
+if [ "x" == "x${gmondClusterName}" ]
+then
+    # No ${gmondClusterName} passed in as command-line arg, so stop
+    # all the gmonds we know about.
+    for gmondClusterName in `getConfiguredGangliaClusterNames`
+    do
+        stopGmondForCluster ${gmondClusterName};
+    done
+else
+    stopGmondForCluster ${gmondClusterName};
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/stopRrdcached.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/stopRrdcached.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/stopRrdcached.sh
new file mode 100644
index 0000000..0a0d8d8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/stopRrdcached.sh
@@ -0,0 +1,41 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+source ./rrdcachedLib.sh;
+
+rrdcachedRunningPid=`getRrdcachedRunningPid`;
+
+# Only go ahead with the termination if we could find a running PID.
+if [ -n "${rrdcachedRunningPid}" ]
+then
+    kill -TERM ${rrdcachedRunningPid};
+    # ${RRDCACHED_BIN} takes a few seconds to drain its buffers, so wait 
+    # until we're sure it's well and truly dead. 
+    #
+    # Without this, an immediately following startRrdcached.sh won't do
+    # anything, because it still sees this soon-to-die instance alive,
+    # and the net result is that after a few seconds, there's no
+    # ${RRDCACHED_BIN} running on the box anymore.
+    sleep 5;
+    echo "Stopped ${RRDCACHED_BIN} (with PID ${rrdcachedRunningPid})";
+fi 

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/teardownGanglia.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/teardownGanglia.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/teardownGanglia.sh
new file mode 100644
index 0000000..b27f7a2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/teardownGanglia.sh
@@ -0,0 +1,28 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get access to Ganglia-wide constants, utilities etc.
+source ./gangliaLib.sh;
+
+# Undo what we did while setting up Ganglia on this box.
+rm -rf ${GANGLIA_CONF_DIR};
+rm -rf ${GANGLIA_RUNTIME_DIR};

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia.py
new file mode 100644
index 0000000..1eae6d0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia.py
@@ -0,0 +1,106 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management import *
+import os
+
+
+def groups_and_users():
+  import params
+
+  Group(params.user_group)
+  Group(params.gmetad_user)
+  Group(params.gmond_user)
+  User(params.gmond_user,
+       groups=[params.gmond_user])
+  User(params.gmetad_user,
+       groups=[params.gmetad_user])
+
+
+def config():
+  import params
+
+  shell_cmds_dir = params.ganglia_shell_cmds_dir
+  shell_files = ['checkGmond.sh', 'checkRrdcached.sh', 'gmetadLib.sh',
+                 'gmondLib.sh', 'rrdcachedLib.sh',
+                 'setupGanglia.sh', 'startGmetad.sh', 'startGmond.sh',
+                 'startRrdcached.sh', 'stopGmetad.sh',
+                 'stopGmond.sh', 'stopRrdcached.sh', 'teardownGanglia.sh']
+  Directory(shell_cmds_dir,
+            owner="root",
+            group="root",
+            recursive=True
+  )
+  init_file("gmetad")
+  init_file("gmond")
+  for sh_file in shell_files:
+    shell_file(sh_file)
+  for conf_file in ['gangliaClusters.conf', 'gangliaEnv.sh', 'gangliaLib.sh']:
+    ganglia_TemplateConfig(conf_file)
+
+
+def init_file(name):
+  import params
+
+  File("/etc/init.d/hdp-" + name,
+       content=StaticFile(name + ".init"),
+       mode=0755
+  )
+
+
+def shell_file(name):
+  import params
+
+  File(params.ganglia_shell_cmds_dir + os.sep + name,
+       content=StaticFile(name),
+       mode=0755
+  )
+
+
+def ganglia_TemplateConfig(name, mode=755, tag=None):
+  import params
+
+  TemplateConfig(format("{params.ganglia_shell_cmds_dir}/{name}"),
+                 owner="root",
+                 group="root",
+                 template_tag=tag,
+                 mode=mode
+  )
+
+
+def generate_daemon(ganglia_service,
+                    name=None,
+                    role=None,
+                    owner=None,
+                    group=None):
+  import params
+
+  cmd = ""
+  if ganglia_service == "gmond":
+    if role == "server":
+      cmd = "{params.ganglia_shell_cmds_dir}/setupGanglia.sh -c {name} -m -o {owner} -g {group}"
+    else:
+      cmd = "{params.ganglia_shell_cmds_dir}/setupGanglia.sh -c {name} -o {owner} -g {group}"
+  elif ganglia_service == "gmetad":
+    cmd = "{params.ganglia_shell_cmds_dir}/setupGanglia.sh -t -o {owner} -g {group}"
+  else:
+    raise Fail("Unexpected ganglia service")
+  Execute(format(cmd),
+          path=[params.ganglia_shell_cmds_dir, "/usr/sbin",
+                "/sbin:/usr/local/bin", "/bin", "/usr/bin"]
+  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_monitor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_monitor.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_monitor.py
new file mode 100644
index 0000000..bddecf6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_monitor.py
@@ -0,0 +1,163 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import sys
+import os
+from os import path
+from resource_management import *
+from ganglia import generate_daemon
+import ganglia
+import ganglia_monitor_service
+
+
+class GangliaMonitor(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env)
+    env.set_params(params)
+    self.config(env)
+
+  def start(self, env):
+    ganglia_monitor_service.monitor("start")
+
+  def stop(self, env):
+    ganglia_monitor_service.monitor("stop")
+
+
+  def status(self, env):
+    import status_params
+    pid_file_name = 'gmond.pid'
+    pid_file_count = 0
+    pid_dir = status_params.pid_dir
+    # Recursively check all existing gmond pid files
+    for cur_dir, subdirs, files in os.walk(pid_dir):
+      for file_name in files:
+        if file_name == pid_file_name:
+          pid_file = os.path.join(cur_dir, file_name)
+          check_process_status(pid_file)
+          pid_file_count += 1
+    if pid_file_count == 0: # If no any pid file is present
+      raise ComponentIsNotRunning()
+
+
+  def config(self, env):
+    import params
+
+    ganglia.groups_and_users()
+
+    Directory(params.ganglia_conf_dir,
+              owner="root",
+              group=params.user_group,
+              recursive=True
+    )
+
+    ganglia.config()
+
+    if params.is_namenode_master:
+      generate_daemon("gmond",
+                      name = "HDPNameNode",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_jtnode_master:
+      generate_daemon("gmond",
+                      name = "HDPJobTracker",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_rmnode_master:
+      generate_daemon("gmond",
+                      name = "HDPResourceManager",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_hsnode_master:
+      generate_daemon("gmond",
+                      name = "HDPHistoryServer",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_hbase_master:
+      generate_daemon("gmond",
+                      name = "HDPHBaseMaster",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_hsnode_master:
+      generate_daemon("gmond",
+                      name = "HDPHistoryServer",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_slave:
+      generate_daemon("gmond",
+                      name = "HDPDataNode",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_tasktracker:
+      generate_daemon("gmond",
+                      name = "HDPTaskTracker",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_hbase_rs:
+      generate_daemon("gmond",
+                      name = "HDPHBaseRegionServer",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_flume:
+      generate_daemon("gmond",
+                      name = "HDPFlumeServer",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+
+    Directory(path.join(params.ganglia_dir, "conf.d"),
+              owner="root",
+              group=params.user_group
+    )
+
+    File(path.join(params.ganglia_dir, "conf.d/modgstatus.conf"),
+         owner="root",
+         group=params.user_group
+    )
+    File(path.join(params.ganglia_dir, "conf.d/multicpu.conf"),
+         owner="root",
+         group=params.user_group
+    )
+    File(path.join(params.ganglia_dir, "gmond.conf"),
+         owner="root",
+         group=params.user_group
+    )
+
+
+if __name__ == "__main__":
+  GangliaMonitor().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_monitor_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_monitor_service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_monitor_service.py
new file mode 100644
index 0000000..d86d894
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_monitor_service.py
@@ -0,0 +1,31 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management import *
+
+
+def monitor(action=None):# 'start' or 'stop'
+  if action == "start":
+    Execute("chkconfig gmond off",
+            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+    )
+  Execute(
+    format(
+      "service hdp-gmond {action} >> /tmp/gmond.log  2>&1 ; /bin/ps auwx | /bin/grep [g]mond  >> /tmp/gmond.log  2>&1"),
+    path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
+  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_server.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_server.py
new file mode 100644
index 0000000..e391562
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_server.py
@@ -0,0 +1,181 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import sys
+import os
+from os import path
+from resource_management import *
+from ganglia import generate_daemon
+import ganglia
+import ganglia_server_service
+
+
+class GangliaServer(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env)
+    env.set_params(params)
+    self.config(env)
+
+  def start(self, env):
+    import params
+
+    env.set_params(params)
+    ganglia_server_service.server("start")
+
+  def stop(self, env):
+    import params
+
+    env.set_params(params)
+    ganglia_server_service.server("stop")
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{pid_dir}/gmetad.pid")
+    # Recursively check all existing gmetad pid files
+    check_process_status(pid_file)
+
+  def config(self, env):
+    import params
+
+    ganglia.groups_and_users()
+    ganglia.config()
+
+    if params.has_namenodes:
+      generate_daemon("gmond",
+                      name = "HDPNameNode",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_jobtracker:
+      generate_daemon("gmond",
+                      name = "HDPJobTracker",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_hbase_masters:
+      generate_daemon("gmond",
+                      name = "HDPHBaseMaster",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_resourcemanager:
+      generate_daemon("gmond",
+                      name = "HDPResourceManager",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+    if params.has_historyserver:
+      generate_daemon("gmond",
+                      name = "HDPHistoryServer",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_slaves:
+      generate_daemon("gmond",
+                      name = "HDPDataNode",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_tasktracker:
+      generate_daemon("gmond",
+                      name = "HDPTaskTracker",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_hbase_rs:
+      generate_daemon("gmond",
+                      name = "HDPHBaseRegionServer",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_flume:
+      generate_daemon("gmond",
+                      name = "HDPFlumeServer",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+    generate_daemon("gmetad",
+                    name = "gmetad",
+                    role = "server",
+                    owner = "root",
+                    group = params.user_group)
+
+    change_permission()
+    server_files()
+    File(path.join(params.ganglia_dir, "gmetad.conf"),
+         owner="root",
+         group=params.user_group
+    )
+
+
+def change_permission():
+  import params
+
+  Directory('/var/lib/ganglia/dwoo',
+            mode=0777,
+            owner=params.gmetad_user,
+            recursive=True
+  )
+
+
+def server_files():
+  import params
+
+  rrd_py_path = params.rrd_py_path
+  Directory(rrd_py_path,
+            recursive=True
+  )
+  rrd_py_file_path = path.join(rrd_py_path, "rrd.py")
+  File(rrd_py_file_path,
+       content=StaticFile("rrd.py"),
+       mode=0755
+  )
+  rrd_file_owner = params.gmetad_user
+  if params.rrdcached_default_base_dir != params.rrdcached_base_dir:
+    Directory(params.rrdcached_base_dir,
+              owner=rrd_file_owner,
+              group=rrd_file_owner,
+              mode=0755,
+              recursive=True
+    )
+    Directory(params.rrdcached_default_base_dir,
+              action = "delete"
+    )
+    Link(params.rrdcached_default_base_dir,
+         to=params.rrdcached_base_dir
+    )
+  elif rrd_file_owner != 'nobody':
+    Directory(params.rrdcached_default_base_dir,
+              owner=rrd_file_owner,
+              group=rrd_file_owner,
+              recursive=True
+    )
+
+
+if __name__ == "__main__":
+  GangliaServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_server_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_server_service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_server_service.py
new file mode 100644
index 0000000..b93e3f8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/ganglia_server_service.py
@@ -0,0 +1,27 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management import *
+
+
+def server(action=None):# 'start' or 'stop'
+  command = "service hdp-gmetad {action} >> /tmp/gmetad.log  2>&1 ; /bin/ps auwx | /bin/grep [g]metad  >> /tmp/gmetad.log  2>&1"
+  Execute(format(command),
+          path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
+  )
+  MonitorWebserver("restart")

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/params.py
new file mode 100644
index 0000000..3700d0a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/params.py
@@ -0,0 +1,74 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management import *
+from resource_management.core.system import System
+
+config = Script.get_config()
+
+user_group = config['configurations']['global']["user_group"]
+ganglia_conf_dir = config['configurations']['global']["ganglia_conf_dir"]
+ganglia_dir = "/etc/ganglia"
+ganglia_runtime_dir = config['configurations']['global']["ganglia_runtime_dir"]
+ganglia_shell_cmds_dir = "/usr/libexec/hdp/ganglia"
+
+gmetad_user = config['configurations']['global']["gmetad_user"]
+gmond_user = config['configurations']['global']["gmond_user"]
+
+webserver_group = "apache"
+rrdcached_default_base_dir = "/var/lib/ganglia/rrds"
+rrdcached_base_dir = config['configurations']['global']["rrdcached_base_dir"]
+
+ganglia_server_host = config["clusterHostInfo"]["ganglia_server_host"][0]
+
+hostname = config["hostname"]
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+jtnode_host = default("/clusterHostInfo/jtnode_host", [])
+rm_host = default("/clusterHostInfo/rm_host", [])
+hs_host = default("/clusterHostInfo/hs_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+# datanodes are marked as slave_hosts
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+tt_hosts = default("/clusterHostInfo/mapred_tt_hosts", [])
+hbase_rs_hosts = default("/clusterHostInfo/hbase_rs_hosts", [])
+flume_hosts = default("/clusterHostInfo/flume_hosts", [])
+
+is_namenode_master = hostname in namenode_host
+is_jtnode_master = hostname in jtnode_host
+is_rmnode_master = hostname in rm_host
+is_hsnode_master = hostname in hs_host
+is_hbase_master = hostname in hbase_master_hosts
+is_slave = hostname in slave_hosts
+is_tasktracker = hostname in tt_hosts
+is_hbase_rs = hostname in hbase_rs_hosts
+is_flume = hostname in flume_hosts
+
+has_namenodes = not len(namenode_host) == 0
+has_jobtracker = not len(jtnode_host) == 0
+has_resourcemanager = not len(rm_host) == 0
+has_historyserver = not len(hs_host) == 0
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_slaves = not len(slave_hosts) == 0
+has_tasktracker = not len(tt_hosts) == 0
+has_hbase_rs = not len(hbase_rs_hosts) == 0
+has_flume = not len(flume_hosts) == 0
+
+if System.get_instance().platform == "suse":
+  rrd_py_path = '/srv/www/cgi-bin'
+else:
+  rrd_py_path = '/var/www/cgi-bin'

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/status_params.py
new file mode 100644
index 0000000..3ccad2f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/scripts/status_params.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+pid_dir = config['configurations']['global']['ganglia_runtime_dir']

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/templates/gangliaClusters.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/templates/gangliaClusters.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/templates/gangliaClusters.conf.j2
new file mode 100644
index 0000000..23588a5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/templates/gangliaClusters.conf.j2
@@ -0,0 +1,34 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+#########################################################
+### ClusterName           GmondMasterHost   GmondPort ###
+#########################################################
+
+    HDPJournalNode      {{ganglia_server_host}}   8654
+    HDPFlumeServer      {{ganglia_server_host}}   8655
+    HDPHBaseRegionServer       	{{ganglia_server_host}}   8656
+    HDPNodeManager     	{{ganglia_server_host}}   8657
+    HDPTaskTracker     	{{ganglia_server_host}}   8658
+    HDPDataNode       	{{ganglia_server_host}}   8659
+    HDPSlaves       	{{ganglia_server_host}}   8660
+    HDPNameNode         {{ganglia_server_host}}   8661
+    HDPJobTracker     	{{ganglia_server_host}}  8662
+    HDPHBaseMaster      {{ganglia_server_host}}   8663
+    HDPResourceManager  {{ganglia_server_host}}   8664
+    HDPHistoryServer    {{ganglia_server_host}}   8666

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/templates/gangliaEnv.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/templates/gangliaEnv.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/templates/gangliaEnv.sh.j2
new file mode 100644
index 0000000..1ead550
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/templates/gangliaEnv.sh.j2
@@ -0,0 +1,24 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Unix users and groups for the binaries we start up.
+GMETAD_USER={{gmetad_user}};
+GMOND_USER={{gmond_user}};
+WEBSERVER_GROUP={{webserver_group}};

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/templates/gangliaLib.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/templates/gangliaLib.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/templates/gangliaLib.sh.j2
new file mode 100644
index 0000000..4b5bdd1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/templates/gangliaLib.sh.j2
@@ -0,0 +1,62 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+GANGLIA_CONF_DIR={{ganglia_conf_dir}};
+GANGLIA_RUNTIME_DIR={{ganglia_runtime_dir}};
+RRDCACHED_BASE_DIR={{rrdcached_base_dir}};
+
+# This file contains all the info about each Ganglia Cluster in our Grid.
+GANGLIA_CLUSTERS_CONF_FILE=./gangliaClusters.conf;
+
+function createDirectory()
+{
+    directoryPath=${1};
+
+    if [ "x" != "x${directoryPath}" ]
+    then
+        mkdir -p ${directoryPath};
+    fi
+}
+
+function getGangliaClusterInfo()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        # Fetch the particular entry for ${clusterName} from ${GANGLIA_CLUSTERS_CONF_FILE}.
+        awk -v clusterName=${clusterName} '($1 !~ /^#/) && ($1 == clusterName)' ${GANGLIA_CLUSTERS_CONF_FILE};
+    else
+        # Spit out all the non-comment, non-empty lines from ${GANGLIA_CLUSTERS_CONF_FILE}.
+        awk '($1 !~ /^#/) && (NF)' ${GANGLIA_CLUSTERS_CONF_FILE};
+    fi
+}
+
+function getConfiguredGangliaClusterNames()
+{
+  # Find all the subdirectories in ${GANGLIA_CONF_DIR} and extract only 
+  # the subdirectory name from each.
+  if [ -e ${GANGLIA_CONF_DIR} ]
+  then  
+    find ${GANGLIA_CONF_DIR} -maxdepth 1 -mindepth 1 -type d | xargs -n1 basename;
+  fi
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/files/hbaseSmokeVerify.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/files/hbaseSmokeVerify.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/files/hbaseSmokeVerify.sh
new file mode 100644
index 0000000..39fe6e5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/files/hbaseSmokeVerify.sh
@@ -0,0 +1,32 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+conf_dir=$1
+data=$2
+echo "scan 'ambarismoketest'" | hbase --config $conf_dir shell > /tmp/hbase_chk_verify
+cat /tmp/hbase_chk_verify
+echo "Looking for $data"
+grep -q $data /tmp/hbase_chk_verify
+if [ "$?" -ne 0 ]
+then
+  exit 1
+fi
+
+grep -q '1 row(s)' /tmp/hbase_chk_verify
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/__init__.py
new file mode 100644
index 0000000..5561e10
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/__init__.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/functions.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/functions.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/functions.py
new file mode 100644
index 0000000..80b49e6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/functions.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+import re
+import math
+import datetime
+
+from resource_management.core.shell import checked_call
+
+def calc_xmn_from_xms(heapsize_str, xmn_percent, xmn_max):
+  """
+  @param heapsize_str: str (e.g '1000m')
+  @param xmn_percent: float (e.g 0.2)
+  @param xmn_max: integer (e.g 512)
+  """
+  heapsize = int(re.search('\d+',heapsize_str).group(0))
+  heapsize_unit = re.search('\D+',heapsize_str).group(0)
+  xmn_val = int(math.floor(heapsize*xmn_percent))
+  xmn_val -= xmn_val % 8
+  
+  result_xmn_val = xmn_max if xmn_val > xmn_max else xmn_val
+  return str(result_xmn_val) + heapsize_unit
+
+def get_unique_id_and_date():
+    code, out = checked_call("hostid")
+    id = out.strip()
+    
+    now = datetime.datetime.now()
+    date = now.strftime("%M%d%y")
+
+    return "id{id}_date{date}".format(id=id, date=date)
+  
+def get_kinit_path(pathes_list):
+  """
+  @param pathes: comma separated list
+  """
+  kinit_path = ""
+  
+  for x in pathes_list:
+    if not x:
+      continue
+    
+    path = os.path.join(x,"kinit")
+
+    if os.path.isfile(path):
+      kinit_path = path
+      break
+    
+  return kinit_path

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase.py
new file mode 100644
index 0000000..bd33463
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase.py
@@ -0,0 +1,91 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import sys
+
+def hbase(type=None # 'master' or 'regionserver' or 'client'
+              ):
+  import params
+  
+  Directory( params.conf_dir,
+      owner = params.hbase_user,
+      group = params.user_group,
+      recursive = True
+  )
+  
+  XmlConfig( "hbase-site.xml",
+            conf_dir = params.conf_dir,
+            configurations = params.config['configurations']['hbase-site'],
+            owner = params.hbase_user,
+            group = params.user_group
+  )
+
+  XmlConfig( "hdfs-site.xml",
+            conf_dir = params.conf_dir,
+            configurations = params.config['configurations']['hdfs-site'],
+            owner = params.hbase_user,
+            group = params.user_group
+  )
+  
+  if 'hbase-policy' in params.config['configurations']:
+    XmlConfig( "hbase-policy.xml",
+      configurations = params.config['configurations']['hbase-policy'],
+      owner = params.hbase_user,
+      group = params.user_group
+    )
+  # Manually overriding ownership of file installed by hadoop package
+  else: 
+    File( format("{conf_dir}/hbase-policy.xml"),
+      owner = params.hbase_user,
+      group = params.user_group
+    )
+  
+  hbase_TemplateConfig( 'hbase-env.sh')     
+       
+  hbase_TemplateConfig( params.metric_prop_file_name,
+    tag = 'GANGLIA-MASTER' if type == 'master' else 'GANGLIA-RS'
+  )
+
+  hbase_TemplateConfig( 'regionservers')
+
+  if params.security_enabled:
+    hbase_TemplateConfig( format("hbase_{type}_jaas.conf"))
+  
+  if type != "client":
+    Directory( params.pid_dir,
+      owner = params.hbase_user,
+      recursive = True
+    )
+  
+    Directory ( [params.tmp_dir, params.log_dir],
+      owner = params.hbase_user,
+      recursive = True
+    )    
+
+def hbase_TemplateConfig(name, 
+                         tag=None
+                         ):
+  import params
+
+  TemplateConfig( format("{conf_dir}/{name}"),
+      owner = params.hbase_user,
+      template_tag = tag
+  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_client.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_client.py
new file mode 100644
index 0000000..0f2a1bc
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_client.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from hbase import hbase
+
+         
+class HbaseClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+    
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    
+    hbase(type='client')
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+#for tests
+def main():
+  command_type = 'install'
+  command_data_file = '/root/workspace/HBase/input.json'
+  basedir = '/root/workspace/HBase/'
+  stdoutfile = '/1.txt'
+  sys.argv = ["", command_type, command_data_file, basedir, stdoutfile]
+  
+  HbaseClient().execute()
+  
+if __name__ == "__main__":
+  HbaseClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_master.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_master.py
new file mode 100644
index 0000000..d94b4b4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_master.py
@@ -0,0 +1,74 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from hbase import hbase
+from hbase_service import hbase_service
+
+         
+class HbaseMaster(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+    
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    hbase(type='master')
+    
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # for security
+
+    hbase_service( 'master',
+      action = 'start'
+    )
+    
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    hbase_service( 'master',
+      action = 'stop'
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{pid_dir}/hbase-hbase-master.pid")
+    check_process_status(pid_file)
+
+def main():
+  command_type = sys.argv[1] if len(sys.argv)>1 else "install"
+  print "Running "+command_type
+  command_data_file = '/var/lib/ambari-agent/data/command-3.json'
+  basedir = '/root/ambari/ambari-server/src/main/resources/stacks/HDP/2.0._/services/HBASE/package'
+  stroutputf = '/1.txt'
+  sys.argv = ["", command_type, command_data_file, basedir, stroutputf]
+  
+  HbaseMaster().execute()
+  
+if __name__ == "__main__":
+  HbaseMaster().execute()
+  #main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_regionserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_regionserver.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_regionserver.py
new file mode 100644
index 0000000..2d91e75
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_regionserver.py
@@ -0,0 +1,75 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from hbase import hbase
+from hbase_service import hbase_service
+
+         
+class HbaseRegionServer(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+    
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    hbase(type='regionserver')
+      
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # for security
+
+    hbase_service( 'regionserver',
+      action = 'start'
+    )
+    
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    hbase_service( 'regionserver',
+      action = 'stop'
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{pid_dir}/hbase-hbase-regionserver.pid")
+    check_process_status(pid_file)
+    
+  def decommission(self, env):
+    print "Decommission not yet implemented!"
+    
+def main():
+  command_type = sys.argv[1] if len(sys.argv)>1 else "stop"
+  print "Running "+command_type
+  command_data_file = '/root/workspace/HBase/input.json'
+  basedir = '/root/workspace/HBase/main'
+  sys.argv = ["", command_type, command_data_file, basedir]
+  
+  HbaseRegionServer().execute()
+  
+if __name__ == "__main__":
+  HbaseRegionServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_service.py
new file mode 100644
index 0000000..7a1248b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/hbase_service.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+def hbase_service(
+  name,
+  action = 'start'): # 'start' or 'stop' or 'status'
+    
+    import params
+  
+    role = name
+    cmd = format("{daemon_script} --config {conf_dir}")
+    pid_file = format("{pid_dir}/hbase-hbase-{role}.pid")
+    
+    daemon_cmd = None
+    no_op_test = None
+    
+    if action == 'start':
+      daemon_cmd = format("{cmd} start {role}")
+      no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
+    elif action == 'stop':
+      daemon_cmd = format("{cmd} stop {role} && rm -f {pid_file}")
+
+    if daemon_cmd is not None:
+      Execute ( daemon_cmd,
+        not_if = no_op_test,
+        user = params.hbase_user
+      )

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/params.py
new file mode 100644
index 0000000..95880cb
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/params.py
@@ -0,0 +1,84 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import functions
+import status_params
+
+# server configurations
+config = Script.get_config()
+
+conf_dir = "/etc/hbase/conf"
+daemon_script = "/usr/lib/hbase/bin/hbase-daemon.sh"
+
+hbase_user = config['configurations']['global']['hbase_user']
+smokeuser = config['configurations']['global']['smokeuser']
+security_enabled = config['configurations']['global']['security_enabled']
+user_group = config['configurations']['global']['user_group']
+
+# this is "hadoop-metrics2-hbase.properties" for 2.x stacks
+metric_prop_file_name = "hadoop-metrics.properties" 
+
+# not supporting 32 bit jdk.
+java64_home = config['hostLevelParams']['java_home']
+
+log_dir = config['configurations']['global']['hbase_log_dir']
+master_heapsize = config['configurations']['global']['hbase_master_heapsize']
+
+regionserver_heapsize = config['configurations']['global']['hbase_regionserver_heapsize']
+regionserver_xmn_size = functions.calc_xmn_from_xms(regionserver_heapsize, 0.2, 512)
+
+pid_dir = status_params.pid_dir
+tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
+
+client_jaas_config_file = default('hbase_client_jaas_config_file', format("{conf_dir}/hbase_client_jaas.conf"))
+master_jaas_config_file = default('hbase_master_jaas_config_file', format("{conf_dir}/hbase_master_jaas.conf"))
+regionserver_jaas_config_file = default('hbase_regionserver_jaas_config_file', format("{conf_dir}/hbase_regionserver_jaas.conf"))
+
+ganglia_server_hosts = default('/clusterHostInfo/ganglia_server_host', []) # is not passed when ganglia is not present
+ganglia_server_host = '' if len(ganglia_server_hosts) == 0 else ganglia_server_hosts[0]
+
+rs_hosts = default('hbase_rs_hosts', config['clusterHostInfo']['slave_hosts']) #if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
+
+smoke_test_user = config['configurations']['global']['smokeuser']
+smokeuser_permissions = default('smokeuser_permissions', "RWXCA")
+service_check_data = get_unique_id_and_date()
+
+if security_enabled:
+  
+  _use_hostname_in_principal = default('instance_name', True)
+  _master_primary_name = config['configurations']['global']['hbase_master_primary_name']
+  _hostname = config['hostname']
+  _kerberos_domain = config['configurations']['global']['kerberos_domain']
+  _master_principal_name = config['configurations']['global']['hbase_master_principal_name']
+  _regionserver_primary_name = config['configurations']['global']['hbase_regionserver_primary_name']
+  
+  if _use_hostname_in_principal:
+    master_jaas_princ = format("{_master_primary_name}/{_hostname}@{_kerberos_domain}")
+    regionserver_jaas_princ = format("{_regionserver_primary_name}/{_hostname}@{_kerberos_domain}")
+  else:
+    master_jaas_princ = format("{_master_principal_name}@{_kerberos_domain}")
+    regionserver_jaas_princ = format("{_regionserver_primary_name}@{_kerberos_domain}")
+    
+master_keytab_path = config['configurations']['hbase-site']['hbase.master.keytab.file']
+regionserver_keytab_path = config['configurations']['hbase-site']['hbase.regionserver.keytab.file']
+smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
+hbase_user_keytab = config['configurations']['global']['hbase_user_keytab']
+kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/service_check.py
new file mode 100644
index 0000000..ff6d0ed
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/service_check.py
@@ -0,0 +1,89 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import functions
+
+
+class HbaseServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    
+    output_file = "/apps/hbase/data/ambarismoketest"
+    test_cmd = format("fs -test -e {output_file}")
+    kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smoke_test_user};") if params.security_enabled else ""
+    hbase_servicecheck_file = '/tmp/hbase-smoke.sh'
+  
+    File( '/tmp/hbaseSmokeVerify.sh',
+      content = StaticFile("hbaseSmokeVerify.sh"),
+      mode = 0755
+    )
+  
+    File( hbase_servicecheck_file,
+      mode = 0755,
+      content = Template('hbase-smoke.sh.j2')
+    )
+    
+    if params.security_enabled:    
+      hbase_grant_premissions_file = '/tmp/hbase_grant_permissions.sh'
+      hbase_kinit_cmd = format("{kinit_path_local} -kt {hbase_user_keytab} {hbase_user};")
+      grantprivelegecmd = format("{hbase_kinit_cmd} hbase shell {hbase_grant_premissions_file}")
+  
+      File( hbase_grant_premissions_file,
+        owner   = params.hbase_user,
+        group   = params.user_group,
+        mode    = 0644,
+        content = Template('hbase_grant_permissions.j2')
+      )
+      
+      Execute( grantprivelegecmd,
+        user = params.hbase_user,
+      )
+
+    servicecheckcmd = format("{kinit_cmd} hbase --config {conf_dir} shell {hbase_servicecheck_file}")
+    smokeverifycmd = format("{kinit_cmd} /tmp/hbaseSmokeVerify.sh {conf_dir} {service_check_data}")
+  
+    Execute( servicecheckcmd,
+      tries     = 3,
+      try_sleep = 5,
+      user = params.smoke_test_user,
+      logoutput = True
+    )
+  
+    Execute ( smokeverifycmd,
+      tries     = 3,
+      try_sleep = 5,
+      user = params.smoke_test_user,
+      logoutput = True
+    )
+    
+def main():
+  import sys
+  command_type = 'perform'
+  command_data_file = '/root/workspace/HBase/input.json'
+  basedir = '/root/workspace/HBase/main'
+  sys.argv = ["", command_type, command_data_file, basedir]
+  
+  HbaseServiceCheck().execute()
+  
+if __name__ == "__main__":
+  HbaseServiceCheck().execute()
+  

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/status_params.py
new file mode 100644
index 0000000..c9b20ef
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/scripts/status_params.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+pid_dir = config['configurations']['global']['hbase_pid_dir']

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-MASTER.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-MASTER.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-MASTER.j2
new file mode 100644
index 0000000..1c75d15
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-MASTER.j2
@@ -0,0 +1,50 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See http://wiki.apache.org/hadoop/GangliaMetrics
+#
+# Make sure you know whether you are using ganglia 3.0 or 3.1.
+# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
+# And, yes, this file is named hadoop-metrics.properties rather than
+# hbase-metrics.properties because we're leveraging the hadoop metrics
+# package and hadoop-metrics.properties is an hardcoded-name, at least
+# for the moment.
+#
+# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
+
+# HBase-specific configuration to reset long-running stats (e.g. compactions)
+# If this variable is left out, then the default is no expiration.
+hbase.extendedperiod = 3600
+
+# Configuration of the "hbase" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+hbase.period=10
+hbase.servers={{ganglia_server_host}}:8663
+
+# Configuration of the "jvm" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+jvm.period=10
+jvm.servers={{ganglia_server_host}}:8663
+
+# Configuration of the "rpc" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+rpc.period=10
+rpc.servers={{ganglia_server_host}}:8663

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-RS.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-RS.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-RS.j2
new file mode 100644
index 0000000..e971e13
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties-GANGLIA-RS.j2
@@ -0,0 +1,50 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See http://wiki.apache.org/hadoop/GangliaMetrics
+#
+# Make sure you know whether you are using ganglia 3.0 or 3.1.
+# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
+# And, yes, this file is named hadoop-metrics.properties rather than
+# hbase-metrics.properties because we're leveraging the hadoop metrics
+# package and hadoop-metrics.properties is an hardcoded-name, at least
+# for the moment.
+#
+# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
+
+# HBase-specific configuration to reset long-running stats (e.g. compactions)
+# If this variable is left out, then the default is no expiration.
+hbase.extendedperiod = 3600
+
+# Configuration of the "hbase" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+hbase.period=10
+hbase.servers={{ganglia_server_host}}:8656
+
+# Configuration of the "jvm" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+jvm.period=10
+jvm.servers={{ganglia_server_host}}:8656
+
+# Configuration of the "rpc" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+rpc.period=10
+rpc.servers={{ganglia_server_host}}:8656

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties.j2
new file mode 100644
index 0000000..1c75d15
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hadoop-metrics.properties.j2
@@ -0,0 +1,50 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See http://wiki.apache.org/hadoop/GangliaMetrics
+#
+# Make sure you know whether you are using ganglia 3.0 or 3.1.
+# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
+# And, yes, this file is named hadoop-metrics.properties rather than
+# hbase-metrics.properties because we're leveraging the hadoop metrics
+# package and hadoop-metrics.properties is an hardcoded-name, at least
+# for the moment.
+#
+# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
+
+# HBase-specific configuration to reset long-running stats (e.g. compactions)
+# If this variable is left out, then the default is no expiration.
+hbase.extendedperiod = 3600
+
+# Configuration of the "hbase" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+hbase.period=10
+hbase.servers={{ganglia_server_host}}:8663
+
+# Configuration of the "jvm" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+jvm.period=10
+jvm.servers={{ganglia_server_host}}:8663
+
+# Configuration of the "rpc" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+rpc.period=10
+rpc.servers={{ganglia_server_host}}:8663

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase-env.sh.j2
new file mode 100644
index 0000000..b8505b5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase-env.sh.j2
@@ -0,0 +1,82 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Set environment variables here.
+
+# The java implementation to use. Java 1.6 required.
+export JAVA_HOME={{java64_home}}
+
+# HBase Configuration directory
+export HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{conf_dir}}}
+
+# Extra Java CLASSPATH elements. Optional.
+export HBASE_CLASSPATH=${HBASE_CLASSPATH}
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+# export HBASE_HEAPSIZE=1000
+
+# Extra Java runtime options.
+# Below are what we set by default. May only work with SUN JVM.
+# For more on why as well as other possible settings,
+# see http://wiki.apache.org/hadoop/PerformanceTuning
+export HBASE_OPTS="-XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log"
+export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`"
+# Uncomment below to enable java garbage collection logging.
+# export HBASE_OPTS="$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log"
+
+# Uncomment and adjust to enable JMX exporting
+# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.
+# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
+#
+# export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
+export HBASE_MASTER_OPTS="-Xmx{{master_heapsize}}"
+export HBASE_REGIONSERVER_OPTS="-Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}"
+# export HBASE_THRIFT_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"
+# export HBASE_ZOOKEEPER_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"
+
+# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.
+export HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers
+
+# Extra ssh options. Empty by default.
+# export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR"
+
+# Where log files are stored. $HBASE_HOME/logs by default.
+export HBASE_LOG_DIR={{log_dir}}
+
+# A string representing this instance of hbase. $USER by default.
+# export HBASE_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes. See 'man nice'.
+# export HBASE_NICENESS=10
+
+# The directory where pid files are stored. /tmp by default.
+export HBASE_PID_DIR={{pid_dir}}
+
+# Seconds to sleep between slave commands. Unset by default. This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HBASE_SLAVE_SLEEP=0.1
+
+# Tell HBase whether it should manage it's own instance of Zookeeper or not.
+export HBASE_MANAGES_ZK=false
+
+{% if security_enabled %}
+export HBASE_OPTS="$HBASE_OPTS -Djava.security.auth.login.config={{client_jaas_config_file}}"
+export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Djava.security.auth.login.config={{master_jaas_config_file}}"
+export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Djava.security.auth.login.config={{regionserver_jaas_config_file}}"
+{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase-smoke.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase-smoke.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase-smoke.sh.j2
new file mode 100644
index 0000000..61fe62f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase-smoke.sh.j2
@@ -0,0 +1,26 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+disable 'ambarismoketest'
+drop 'ambarismoketest'
+create 'ambarismoketest','family'
+put 'ambarismoketest','row01','family:col01','{{service_check_data}}'
+scan 'ambarismoketest'
+exit
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_client_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_client_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_client_jaas.conf.j2
new file mode 100644
index 0000000..3b3bb18
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HBASE/package/templates/hbase_client_jaas.conf.j2
@@ -0,0 +1,23 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=false
+useTicketCache=true;
+};


[22/37] AMBARI-4341. Rename 2.0.8 to 2.1.1 in the stack definition. (mahadev)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/hcat_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/hcat_service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/hcat_service_check.py
deleted file mode 100644
index 5112e99..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/hcat_service_check.py
+++ /dev/null
@@ -1,63 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-def hcat_service_check():
-    import params
-
-    unique = get_unique_id_and_date()
-    output_file = format("/apps/hive/warehouse/hcatsmoke{unique}")
-    test_cmd = format("fs -test -e {output_file}")
-
-    if params.security_enabled:
-      kinit_cmd = format(
-        "{kinit_path_local} -kt {smoke_user_keytab} {smokeuser}; ")
-    else:
-      kinit_cmd = ""
-
-    File('/tmp/hcatSmoke.sh',
-         content=StaticFile("hcatSmoke.sh"),
-         mode=0755
-    )
-
-    prepare_cmd = format("{kinit_cmd}sh /tmp/hcatSmoke.sh hcatsmoke{unique} prepare")
-
-    Execute(prepare_cmd,
-            tries=3,
-            user=params.smokeuser,
-            try_sleep=5,
-            path=['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
-            logoutput=True)
-
-    ExecuteHadoop(test_cmd,
-                  user=params.hdfs_user,
-                  logoutput=True,
-                  conf_dir=params.hadoop_conf_dir)
-
-    cleanup_cmd = format("{kinit_cmd}sh /tmp/hcatSmoke.sh hcatsmoke{unique} cleanup")
-
-    Execute(cleanup_cmd,
-            tries=3,
-            user=params.smokeuser,
-            try_sleep=5,
-            path=['/usr/sbin', '/usr/local/nin', '/bin', '/usr/bin'],
-            logoutput=True
-    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/hive.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/hive.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/hive.py
deleted file mode 100644
index b37ebb2..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/hive.py
+++ /dev/null
@@ -1,122 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import sys
-
-
-def hive(name=None):
-  import params
-
-  if name == 'metastore' or name == 'hiveserver2':
-    hive_config_dir = params.hive_server_conf_dir
-    config_file_mode = 0600
-    jdbc_connector()
-  else:
-    hive_config_dir = params.hive_conf_dir
-    config_file_mode = 0644
-
-  Directory(hive_config_dir,
-            owner=params.hive_user,
-            group=params.user_group,
-            recursive=True
-  )
-
-  XmlConfig("hive-site.xml",
-            conf_dir=hive_config_dir,
-            configurations=params.config['configurations']['hive-site'],
-            owner=params.hive_user,
-            group=params.user_group,
-            mode=config_file_mode
-  )
-
-  cmd = format("/bin/sh -c 'cd /usr/lib/ambari-agent/ && curl -kf --retry 5 "
-               "{jdk_location}{check_db_connection_jar_name} -o {check_db_connection_jar_name}'")
-
-  Execute(cmd,
-          not_if=format("[ -f {check_db_connection_jar_name}]"))
-
-  if name == 'metastore':
-    File(params.start_metastore_path,
-         mode=0755,
-         content=StaticFile('startMetastore.sh')
-    )
-
-  elif name == 'hiveserver2':
-    File(params.start_hiveserver2_path,
-         mode=0755,
-         content=StaticFile('startHiveserver2.sh')
-    )
-
-  if name != "client":
-    crt_directory(params.hive_pid_dir)
-    crt_directory(params.hive_log_dir)
-    crt_directory(params.hive_var_lib)
-
-  File(format("{hive_config_dir}/hive-env.sh"),
-       owner=params.hive_user,
-       group=params.user_group,
-       content=Template('hive-env.sh.j2', conf_dir=hive_config_dir)
-  )
-
-  crt_file(format("{hive_conf_dir}/hive-default.xml.template"))
-  crt_file(format("{hive_conf_dir}/hive-env.sh.template"))
-  crt_file(format("{hive_conf_dir}/hive-exec-log4j.properties.template"))
-  crt_file(format("{hive_conf_dir}/hive-log4j.properties.template"))
-
-
-def crt_directory(name):
-  import params
-
-  Directory(name,
-            recursive=True,
-            owner=params.hive_user,
-            group=params.user_group,
-            mode=0755)
-
-
-def crt_file(name):
-  import params
-
-  File(name,
-       owner=params.hive_user,
-       group=params.user_group
-  )
-
-
-def jdbc_connector():
-  import params
-
-  if params.hive_jdbc_driver == "com.mysql.jdbc.Driver":
-    cmd = format("hive mkdir -p {artifact_dir} ; cp /usr/share/java/{jdbc_jar_name} {target}")
-
-    Execute(cmd,
-            not_if=format("test -f {target}"),
-            creates=params.target,
-            path=["/bin", "usr/bin/"])
-
-  elif params.hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
-    cmd = format(
-      "mkdir -p {artifact_dir} ; curl -kf --retry 10 {driver_curl_source} -o {driver_curl_target} &&  "
-      "cp {driver_curl_target} {target}")
-
-    Execute(cmd,
-            not_if=format("test -f {target}"),
-            path=["/bin", "usr/bin/"])

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/hive_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/hive_client.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/hive_client.py
deleted file mode 100644
index 0a5fb2b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/hive_client.py
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-import sys
-from resource_management import *
-
-from hive import hive
-
-class HiveClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    hive(name='client')
-
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-if __name__ == "__main__":
-  HiveClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/hive_metastore.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/hive_metastore.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/hive_metastore.py
deleted file mode 100644
index c741174..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/hive_metastore.py
+++ /dev/null
@@ -1,63 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from hive import hive
-from hive_service import hive_service
-
-class HiveMetastore(Script):
-
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    hive(name='metastore')
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env) # FOR SECURITY
-    hive_service( 'metastore',
-                   action = 'start'
-    )
-
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    hive_service( 'metastore',
-                   action = 'stop'
-    )
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    pid_file = format("{hive_pid_dir}/{hive_metastore_pid}")
-    # Recursively check all existing gmetad pid files
-    check_process_status(pid_file)
-
-if __name__ == "__main__":
-  HiveMetastore().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/hive_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/hive_server.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/hive_server.py
deleted file mode 100644
index 3ad81a1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/hive_server.py
+++ /dev/null
@@ -1,63 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from hive import hive
-from hive_service import hive_service
-
-class HiveServer(Script):
-
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    hive(name='hiveserver2')
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env) # FOR SECURITY
-    hive_service( 'hiveserver2',
-                  action = 'start'
-    )
-
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    hive_service( 'hiveserver2',
-                  action = 'stop'
-    )
-
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    pid_file = format("{hive_pid_dir}/{hive_pid}")
-    # Recursively check all existing gmetad pid files
-    check_process_status(pid_file)
-
-if __name__ == "__main__":
-  HiveServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/hive_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/hive_service.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/hive_service.py
deleted file mode 100644
index e8d4e5c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/hive_service.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-
-def hive_service(
-    name,
-    action='start'):
-
-  import params
-
-  if name == 'metastore':
-    pid_file = format("{hive_pid_dir}/{hive_metastore_pid}")
-    cmd = format(
-      "env HADOOP_HOME={hadoop_home} JAVA_HOME={java64_home} {start_metastore_path} {hive_log_dir}/hive.out {hive_log_dir}/hive.log {pid_file} {hive_server_conf_dir}")
-  elif name == 'hiveserver2':
-    pid_file = format("{hive_pid_dir}/{hive_pid}")
-    cmd = format(
-      "env JAVA_HOME={java64_home} {start_hiveserver2_path} {hive_log_dir}/hive-server2.out {hive_log_dir}/hive-server2.log {pid_file} {hive_server_conf_dir}")
-
-  if action == 'start':
-    demon_cmd = format("{cmd}")
-    no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
-    Execute(demon_cmd,
-            user=params.hive_user,
-            not_if=no_op_test
-    )
-
-    if params.hive_jdbc_driver == "com.mysql.jdbc.Driver" or params.hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
-      db_connection_check_command = format(
-        "{java64_home}/bin/java -cp {check_db_connection_jar}:/usr/share/java/{jdbc_jar_name} org.apache.ambari.server.DBConnectionVerification {hive_jdbc_connection_url} {hive_metastore_user_name} {hive_metastore_user_passwd} {hive_jdbc_driver}")
-      Execute(db_connection_check_command,
-              path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin')
-
-  elif action == 'stop':
-    demon_cmd = format("kill `cat {pid_file}` >/dev/null 2>&1 && rm -f {pid_file}")
-    Execute(demon_cmd)
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/mysql_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/mysql_server.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/mysql_server.py
deleted file mode 100644
index a45d310..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/mysql_server.py
+++ /dev/null
@@ -1,77 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-
-from mysql_service import mysql_service
-
-class MysqlServer(Script):
-
-  if System.get_instance().platform == "suse":
-    daemon_name = 'mysql'
-  else:
-    daemon_name = 'mysqld'
-
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    mysql_service(daemon_name=self.daemon_name, action='start')
-
-    File(params.mysql_adduser_path,
-         mode=0755,
-         content=StaticFile('addMysqlUser.sh')
-    )
-
-    # Autoescaping
-    cmd = ("bash", "-x", params.mysql_adduser_path, self.daemon_name,
-           params.hive_metastore_user_name, str(params.hive_metastore_user_passwd) , params.mysql_host[0])
-
-    Execute(cmd,
-            tries=3,
-            try_sleep=5,
-            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-            logoutput=True
-    )
-
-    mysql_service(daemon_name=self.daemon_name, action='stop')
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-
-    mysql_service(daemon_name=self.daemon_name, action = 'start')
-
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    mysql_service(daemon_name=self.daemon_name, action = 'stop')
-
-  def status(self, env):
-    mysql_service(daemon_name=self.daemon_name, action = 'status')
-
-if __name__ == "__main__":
-  MysqlServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/mysql_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/mysql_service.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/mysql_service.py
deleted file mode 100644
index cfb3e08..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/mysql_service.py
+++ /dev/null
@@ -1,38 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-
-def mysql_service(daemon_name=None, action='start'):
-  cmd = format('service {daemon_name} {action}')
-
-  if action == 'status':
-    logoutput = False
-  else:
-    logoutput = True
-
-  Execute(cmd,
-          path="/usr/local/bin/:/bin/:/sbin/",
-          tries=1,
-          logoutput=logoutput)
-
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/params.py
deleted file mode 100644
index 0cf89be..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/params.py
+++ /dev/null
@@ -1,123 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import status_params
-
-# server configurations
-config = Script.get_config()
-
-hive_metastore_user_name = config['configurations']['hive-site']['javax.jdo.option.ConnectionUserName']
-hive_server_conf_dir = "/etc/hive/conf.server"
-hive_jdbc_connection_url = config['configurations']['hive-site']['javax.jdo.option.ConnectionURL']
-
-hive_metastore_user_passwd = config['configurations']['hive-site']['javax.jdo.option.ConnectionPassword']
-
-#users
-hive_user = config['configurations']['global']['hive_user']
-hive_lib = '/usr/lib/hive/lib/'
-#JDBC driver jar name
-hive_jdbc_driver = default('hive_jdbc_driver', 'com.mysql.jdbc.Driver')
-if hive_jdbc_driver == "com.mysql.jdbc.Driver":
-  jdbc_jar_name = "mysql-connector-java.jar"
-elif hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
-  jdbc_jar_name = "ojdbc6.jar"
-
-check_db_connection_jar_name = "DBConnectionVerification.jar"
-check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
-
-#common
-hive_metastore_port = config['configurations']['global']['hive_metastore_port']
-hive_var_lib = '/var/lib/hive'
-hive_server_host = config['clusterHostInfo']['hive_server_host']
-hive_url = format("jdbc:hive2://{hive_server_host}:10000")
-
-smokeuser = config['configurations']['global']['smokeuser']
-smoke_test_sql = "/tmp/hiveserver2.sql"
-smoke_test_path = "/tmp/hiveserver2Smoke.sh"
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-
-security_enabled = config['configurations']['global']['security_enabled']
-
-kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-hive_metastore_keytab_path =  config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file']
-
-#hive_env
-hive_conf_dir = "/etc/hive/conf"
-hive_dbroot = config['configurations']['global']['hive_dbroot']
-hive_log_dir = config['configurations']['global']['hive_log_dir']
-hive_pid_dir = status_params.hive_pid_dir
-hive_pid = status_params.hive_pid
-
-#hive-site
-hive_database_name = config['configurations']['global']['hive_database_name']
-
-#Starting hiveserver2
-start_hiveserver2_script = 'startHiveserver2.sh'
-
-hadoop_home = '/usr'
-
-##Starting metastore
-start_metastore_script = 'startMetastore.sh'
-hive_metastore_pid = status_params.hive_metastore_pid
-java_share_dir = '/usr/share/java'
-driver_curl_target = format("{java_share_dir}/{jdbc_jar_name}")
-
-hdfs_user =  config['configurations']['global']['hdfs_user']
-user_group = config['configurations']['global']['user_group']
-artifact_dir = "/tmp/HDP-artifacts/"
-
-target = format("{hive_lib}/{jdbc_jar_name}")
-
-jdk_location = config['hostLevelParams']['jdk_location']
-driver_curl_source = format("{jdk_location}/{jdbc_jar_name}")
-
-start_hiveserver2_path = "/tmp/start_hiveserver2_script"
-start_metastore_path = "/tmp/start_metastore_script"
-
-hive_aux_jars_path = config['configurations']['global']['hive_aux_jars_path']
-hadoop_heapsize = config['configurations']['global']['hadoop_heapsize']
-java64_home = config['hostLevelParams']['java_home']
-
-##### MYSQL
-
-db_name = config['configurations']['global']['hive_database_name']
-mysql_user = "mysql"
-mysql_group = 'mysql'
-mysql_host = config['clusterHostInfo']['hive_mysql_host']
-
-mysql_adduser_path = "/tmp/addMysqlUser.sh"
-
-########## HCAT
-
-hcat_conf_dir = '/etc/hcatalog/conf'
-
-metastore_port = 9933
-hcat_lib = '/usr/lib/hcatalog/share/hcatalog'
-
-hcat_dbroot = hcat_lib
-
-hcat_user = config['configurations']['global']['hcat_user']
-webhcat_user = config['configurations']['global']['webhcat_user']
-
-hcat_pid_dir = status_params.hcat_pid_dir
-hcat_log_dir = config['configurations']['global']['hcat_log_dir']   #hcat_log_dir
-
-hadoop_conf_dir = '/etc/hadoop/conf'

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/service_check.py
deleted file mode 100644
index 111e8a1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/service_check.py
+++ /dev/null
@@ -1,56 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-from hcat_service_check import hcat_service_check
-
-class HiveServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-    if params.security_enabled:
-      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser};")
-      hive_principal_ext = format("principal={hive_metastore_keytab_path}")
-      hive_url_ext = format("{hive_url}/\\;{hive_principal_ext}")
-      smoke_cmd = format("{kinit_cmd} env JAVA_HOME={java64_home} {smoke_test_path} {hive_url_ext} {smoke_test_sql}")
-    else:
-      smoke_cmd = format("env JAVA_HOME={java64_home} {smoke_test_path} {hive_url} {smoke_test_sql}")
-
-    File(params.smoke_test_path,
-         content=StaticFile('hiveserver2Smoke.sh'),
-         mode=0755
-    )
-
-    File(params.smoke_test_sql,
-         content=StaticFile('hiveserver2.sql')
-    )
-
-    Execute(smoke_cmd,
-            tries=3,
-            try_sleep=5,
-            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-            logoutput=True,
-            user=params.smokeuser)
-
-    hcat_service_check()
-
-if __name__ == "__main__":
-  HiveServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/status_params.py
deleted file mode 100644
index 7770975..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/status_params.py
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-hive_pid_dir = config['configurations']['global']['hive_pid_dir']
-hive_pid = 'hive-server.pid'
-
-hive_metastore_pid = 'hive.pid'
-
-hcat_pid_dir = config['configurations']['global']['hcat_pid_dir'] #hcat_pid_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/templates/hcat-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/templates/hcat-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/templates/hcat-env.sh.j2
deleted file mode 100644
index 2a35240..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/templates/hcat-env.sh.j2
+++ /dev/null
@@ -1,25 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-JAVA_HOME={{java64_home}}
-HCAT_PID_DIR={{hcat_pid_dir}}/
-HCAT_LOG_DIR={{hcat_log_dir}}/
-HCAT_CONF_DIR={{hcat_conf_dir}}
-HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
-#DBROOT is the path where the connector jars are downloaded
-DBROOT={{hcat_dbroot}}
-USER={{hcat_user}}
-METASTORE_PORT={{metastore_port}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/templates/hive-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/templates/hive-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/templates/hive-env.sh.j2
deleted file mode 100644
index 548262a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/templates/hive-env.sh.j2
+++ /dev/null
@@ -1,55 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements. See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership. The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Set Hive and Hadoop environment variables here. These variables can be used
-# to control the execution of Hive. It should be used by admins to configure
-# the Hive installation (so that users do not have to set environment variables
-# or set command line parameters to get correct behavior).
-#
-# The hive service being invoked (CLI/HWI etc.) is available via the environment
-# variable SERVICE
-
-# Hive Client memory usage can be an issue if a large number of clients
-# are running at the same time. The flags below have been useful in
-# reducing memory usage:
-#
- if [ "$SERVICE" = "cli" ]; then
-   if [ -z "$DEBUG" ]; then
-     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
-   else
-     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
-   fi
- fi
-
-# The heap size of the jvm stared by hive shell script can be controlled via:
-
-export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
-export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
-
-# Larger heap size may be required when running queries over large number of files or partitions.
-# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
-# appropriate for hive server (hwi etc).
-
-
-# Set HADOOP_HOME to point to a specific hadoop install directory
-HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
-
-# Hive Configuration Directory can be controlled by:
-export HIVE_CONF_DIR={{conf_dir}}
-
-# Folder containing extra ibraries required for hive compilation/execution can be controlled by:
-# export HIVE_AUX_JARS_PATH=
-export HIVE_AUX_JARS_PATH={{hive_aux_jars_path}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/metainfo.xml
deleted file mode 100644
index 9af461e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/metainfo.xml
+++ /dev/null
@@ -1,105 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>NAGIOS</name>
-      <comment>Nagios Monitoring and Alerting system</comment>
-      <version>3.5.0</version>
-      <components>
-        <component>
-            <name>NAGIOS_SERVER</name>
-            <category>MASTER</category>
-            <commandScript>
-              <script>scripts/nagios_server.py</script>
-              <scriptType>PYTHON</scriptType>
-              <timeout>600</timeout>
-            </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>perl</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>perl-Net-SNMP</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>nagios-plugins-1.4.9</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>nagios-3.5.0-99</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>nagios-www-3.5.0-99</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>nagios-devel-3.5.0-99</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>fping</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>hdp_mon_nagios_addons</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osType>suse</osType>
-          <package>
-            <type>rpm</type>
-            <name>php5-json</name>
-          </package>
-        </osSpecific>
-        <osSpecific>
-          <osType>centos5</osType>
-          <package>
-            <type>rpm</type>
-            <name>php-pecl-json.x86_64</name>
-          </package>
-        </osSpecific>
-        <osSpecific>
-          <osType>redhat5</osType>
-          <package>
-            <type>rpm</type>
-            <name>php-pecl-json.x86_64</name>
-          </package>
-        </osSpecific>
-        <osSpecific>
-          <osType>oraclelinux5</osType>
-          <package>
-            <type>rpm</type>
-            <name>php-pecl-json.x86_64</name>
-          </package>
-        </osSpecific>
-      </osSpecifics>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_aggregate.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_aggregate.php b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_aggregate.php
deleted file mode 100644
index f4063fb..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_aggregate.php
+++ /dev/null
@@ -1,243 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-  $options = getopt ("f:s:n:w:c:t:");
-  if (!array_key_exists('t', $options) || !array_key_exists('f', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options) || !array_key_exists('s', $options)) {
-    usage();
-    exit(3);
-  }
-  $status_file=$options['f'];
-  $status_code=$options['s'];
-  $type=$options['t'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-  if ($type == "service" && !array_key_exists('n', $options)) {
-    echo "Service description not provided -n option\n";
-    exit(3);
-  }
-  if ($type == "service") {
-    $service_name=$options['n'];
-    /* echo "DESC: " . $service_name . "\n"; */
-  }
-
-  $result = array();
-  $status_file_content = file_get_contents($status_file);
-
-  $counts;
-  if ($type == "service") {
-    $counts=query_alert_count($status_file_content, $service_name, $status_code);
-  } else {
-    $counts=query_host_count($status_file_content, $status_code);
-  }
-
-  if ($counts['total'] == 0) {
-    $percent = 0;
-  } else {
-    $percent = ($counts['actual']/$counts['total'])*100;
-  }
-  if ($percent >= $crit) {
-    echo "CRITICAL: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
-    exit (2);
-  }
-  if ($percent >= $warn) {
-    echo "WARNING: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
-    exit (1);
-  }
-  echo "OK: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
-  exit(0);
-
-
-  # Functions
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -f <status_file_path> -t type(host/service) -s <status_codes> -n <service description> -w <warn%> -c <crit%>\n";
-  }
-
-  /* Query host count */
-  function query_host_count ($status_file_content, $status_code) {
-    $num_matches = preg_match_all("/hoststatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
-    $hostcounts_object = array ();
-    $total_hosts = 0;
-    $hosts = 0;
-    foreach ($matches[0] as $object) {
-      $total_hosts++;
-      if (getParameter($object, "current_state") == $status_code) {
-        $hosts++;
-      }
-    }
-    $hostcounts_object['total'] = $total_hosts;
-    $hostcounts_object['actual'] = $hosts;
-    return $hostcounts_object;
-  }
-
-  /* Query Alert counts */
-  function query_alert_count ($status_file_content, $service_name, $status_code) {
-    $num_matches = preg_match_all("/servicestatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
-    $alertcounts_objects = array ();
-    $total_alerts=0;
-    $alerts=0;
-    foreach ($matches[0] as $object) {
-      if (getParameter($object, "service_description") == $service_name) {
-        $total_alerts++;
-        if (getParameter($object, "current_state") >= $status_code) {
-          $alerts++;
-        }
-      }
-    }
-    $alertcounts_objects['total'] = $total_alerts;
-    $alertcounts_objects['actual'] = $alerts;
-    return $alertcounts_objects;
-  }
-
-  function get_service_type($service_description)
-  {
-    $pieces = explode("::", $service_description);
-    switch ($pieces[0]) {
-      case "NAMENODE":
-        $pieces[0] = "HDFS";
-        break;
-      case "JOBTRACKER":
-        $pieces[0] = "MAPREDUCE";
-        break;
-      case "HBASEMASTER":
-        $pieces[0] = "HBASE";
-        break;
-      case "SYSTEM":
-      case "HDFS":
-      case "MAPREDUCE":
-      case "HBASE":
-        break;
-      default:
-        $pieces[0] = "UNKNOWN";
-    }
-    return $pieces[0];
-  }
-
-  function getParameter($object, $key)
-  {
-    $pattern="/\s" . $key . "[\s= ]*([\S, ]*)\n/";
-    $num_mat = preg_match($pattern, $object, $matches);
-    $value = "";
-    if ($num_mat) {
-      $value = $matches[1];
-    }
-    return $value;
-  }
-
-function indent($json) {
-
-    $result      = '';
-    $pos         = 0;
-    $strLen      = strlen($json);
-    $indentStr   = '  ';
-    $newLine     = "\n";
-    $prevChar    = '';
-    $outOfQuotes = true;
-
-    for ($i=0; $i<=$strLen; $i++) {
-
-        // Grab the next character in the string.
-        $char = substr($json, $i, 1);
-
-        // Are we inside a quoted string?
-        if ($char == '"' && $prevChar != '\\') {
-            $outOfQuotes = !$outOfQuotes;
-
-        // If this character is the end of an element,
-        // output a new line and indent the next line.
-        } else if(($char == '}' || $char == ']') && $outOfQuotes) {
-            $result .= $newLine;
-            $pos --;
-            for ($j=0; $j<$pos; $j++) {
-                $result .= $indentStr;
-            }
-        }
-
-        // Add the character to the result string.
-        $result .= $char;
-
-        // If the last character was the beginning of an element,
-        // output a new line and indent the next line.
-        if (($char == ',' || $char == '{' || $char == '[') && $outOfQuotes) {
-            $result .= $newLine;
-            if ($char == '{' || $char == '[') {
-                $pos ++;
-            }
-
-            for ($j = 0; $j < $pos; $j++) {
-                $result .= $indentStr;
-            }
-        }
-
-        $prevChar = $char;
-    }
-
-    return $result;
-}
-
-/* JSON documment format */
-/*
-{
-  "programstatus":{
-    "last_command_check":"1327385743"
-  },
-  "hostcounts":{
-    "up_nodes":"",
-    "down_nodes":""
-  },
-  "hoststatus":[
-    {
-      "host_name"="ip-10-242-191-48.ec2.internal",
-      "current_state":"0",
-      "last_hard_state":"0",
-      "plugin_output":"PING OK - Packet loss = 0%, RTA = 0.04 ms",
-      "last_check":"1327385564",
-      "current_attempt":"1",
-      "last_hard_state_change":"1327362079",
-      "last_time_up":"1327385574",
-      "last_time_down":"0",
-      "last_time_unreachable":"0",
-      "is_flapping":"0",
-      "last_check":"1327385574",
-      "servicestatus":[
-      ]
-    }
-  ],
-  "servicestatus":[
-    {
-      "service_type":"HDFS",  {HBASE, MAPREDUCE, HIVE, ZOOKEEPER}
-      "service_description":"HDFS Current Load",
-      "host_name"="ip-10-242-191-48.ec2.internal",
-      "current_attempt":"1",
-      "current_state":"0",
-      "plugin_output":"PING OK - Packet loss = 0%, RTA = 0.04 ms",
-      "last_hard_state_change":"1327362079",
-      "last_time_ok":"1327385479",
-      "last_time_warning":"0",
-      "last_time_unknown":"0",
-      "last_time_critical":"0",
-      "last_check":"1327385574",
-      "is_flapping":"0"
-    }
-  ]
-}
-*/
-
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_cpu.pl
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_cpu.pl b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_cpu.pl
deleted file mode 100644
index a5680f7..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_cpu.pl
+++ /dev/null
@@ -1,114 +0,0 @@
-#!/usr/bin/perl -w 
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-use strict;
-use Net::SNMP;
-use Getopt::Long;
-
-# Variable
-my $base_proc = "1.3.6.1.2.1.25.3.3.1";   
-my $proc_load = "1.3.6.1.2.1.25.3.3.1.2"; 
-my $o_host = 	undef;
-my $o_community = undef;
-my $o_warn=	undef;
-my $o_crit=	undef;
-my $o_timeout = 15;
-my $o_port = 161;
-
-sub Usage {
-    print "Usage: $0 -H <host> -C <snmp_community> -w <warn level> -c <crit level>\n";
-}
-
-Getopt::Long::Configure ("bundling");
-GetOptions(
-  'H:s'   => \$o_host,	
-  'C:s'   => \$o_community,	
-  'c:s'   => \$o_crit,        
-  'w:s'   => \$o_warn
-          );
-if (!defined $o_host || !defined $o_community || !defined $o_crit || !defined $o_warn) {
-  Usage();
-  exit 3;
-}
-$o_warn =~ s/\%//g; 
-$o_crit =~ s/\%//g;
-alarm ($o_timeout);
-$SIG{'ALRM'} = sub {
- print "Unable to contact host: $o_host\n";
- exit 3;
-};
-
-# Connect to host
-my ($session,$error);
-($session, $error) = Net::SNMP->session(
-		-hostname  => $o_host,
-		-community => $o_community,
-		-port      => $o_port,
-		-timeout   => $o_timeout
-	  );
-if (!defined($session)) {
-   printf("Error opening session: %s.\n", $error);
-   exit 3;
-}
-
-my $exit_val=undef;
-my $resultat =  (Net::SNMP->VERSION < 4) ?
-	  $session->get_table($base_proc)
-	: $session->get_table(Baseoid => $base_proc);
-
-if (!defined($resultat)) {
-   printf("ERROR: Description table : %s.\n", $session->error);
-   $session->close;
-   exit 3;
-}
-
-$session->close;
-
-my ($cpu_used,$ncpu)=(0,0);
-foreach my $key ( keys %$resultat) {
-  if ($key =~ /$proc_load/) {
-    $cpu_used += $$resultat{$key};
-    $ncpu++;
-  }
-}
-
-if ($ncpu==0) {
-  print "Can't find CPU usage information : UNKNOWN\n";
-  exit 3;
-}
-
-$cpu_used /= $ncpu;
-
-print "$ncpu CPU, ", $ncpu==1 ? "load" : "average load";
-printf(" %.1f%%",$cpu_used);
-$exit_val=0;
-
-if ($cpu_used > $o_crit) {
- print " > $o_crit% : CRITICAL\n";
- $exit_val=2;
-} else {
-  if ($cpu_used > $o_warn) {
-   print " > $o_warn% : WARNING\n";
-   $exit_val=1;
-  }
-}
-print " < $o_warn% : OK\n" if ($exit_val eq 0);
-exit $exit_val;

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_datanode_storage.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_datanode_storage.php b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_datanode_storage.php
deleted file mode 100644
index dee22b4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_datanode_storage.php
+++ /dev/null
@@ -1,100 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * check the storage capacity remaining on local datanode storage
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:w:c:e:k:r:t:s:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options) 
-      || !array_key_exists('c', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $host=$options['h'];
-  $port=$options['p'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['s'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-  /* Get the json document */
-  $ch = curl_init();
-  $username = rtrim(`id -un`, "\n");
-  curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=DataNode,name=FSDatasetState-*",
-                                CURLOPT_RETURNTRANSFER => true,
-                                CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                CURLOPT_USERPWD => "$username:",
-                                CURLOPT_SSL_VERIFYPEER => FALSE ));
-  $json_string = curl_exec($ch);
-  $info = curl_getinfo($ch);
-  if (intval($info['http_code']) == 401){
-    logout();
-    $json_string = curl_exec($ch);
-  }
-  $info = curl_getinfo($ch);
-  curl_close($ch);
-  $json_array = json_decode($json_string, true);
-  $object = $json_array['beans'][0];
-  $cap_remain = $object['Remaining']; /* Total capacity - any extenal files created in data directories by non-hadoop app */
-  $cap_total = $object['Capacity']; /* Capacity used by all data partitions minus space reserved for M/R */
-  if (count($object) == 0) {
-    echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-    exit(2);
-  }  
-  $percent_full = ($cap_total - $cap_remain)/$cap_total * 100;
-
-  $out_msg = "Capacity:[" . $cap_total . 
-             "], Remaining Capacity:[" . $cap_remain . 
-             "], percent_full:[" . $percent_full  . "]";
-  
-  if ($percent_full > $crit) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  if ($percent_full > $warn) {
-    echo "WARNING: " . $out_msg . "\n";
-    exit (1);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
-  }
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_hdfs_blocks.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_hdfs_blocks.php b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_hdfs_blocks.php
deleted file mode 100644
index 19347b4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_hdfs_blocks.php
+++ /dev/null
@@ -1,115 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * check the corrupt or missing blocks % is > threshod
- * check_jmx -H hostaddress -p port -w 1% -c 1%
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:w:c:s:e:k:r:t:u:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options) || !array_key_exists('s', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $hosts=$options['h'];
-  $port=$options['p'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-  $nn_jmx_property=$options['s'];
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['u'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-
-  foreach (preg_split('/,/', $hosts) as $host) {
-    /* Get the json document */
-
-    $ch = curl_init();
-    $username = rtrim(`id -un`, "\n");
-    curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=".$nn_jmx_property,
-                                  CURLOPT_RETURNTRANSFER => true,
-                                  CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                  CURLOPT_USERPWD => "$username:",
-                                  CURLOPT_SSL_VERIFYPEER => FALSE ));
-    $json_string = curl_exec($ch);
-    $info = curl_getinfo($ch);
-    if (intval($info['http_code']) == 401){
-      logout();
-      $json_string = curl_exec($ch);
-    }
-    $info = curl_getinfo($ch);
-    curl_close($ch);
-    $json_array = json_decode($json_string, true);
-    $m_percent = 0;
-    $c_percent = 0;
-    $object = $json_array['beans'][0];
-    $missing_blocks = $object['MissingBlocks'];
-    $corrupt_blocks = $object['CorruptBlocks'];
-    $total_blocks = $object['BlocksTotal'];
-    if (count($object) == 0) {
-      echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-      exit(2);
-    }    
-    if($total_blocks == 0) {
-      $m_percent = 0;
-      $c_percent = 0;
-    } else {
-      $m_percent = ($missing_blocks/$total_blocks)*100;
-      $c_percent = ($corrupt_blocks/$total_blocks)*100;
-      break;
-    }
-  }
-  $out_msg = "corrupt_blocks:<" . $corrupt_blocks .
-             ">, missing_blocks:<" . $missing_blocks .
-             ">, total_blocks:<" . $total_blocks . ">";
-
-  if ($m_percent > $crit || $c_percent > $crit) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  if ($m_percent > $warn || $c_percent > $warn) {
-    echo "WARNING: " . $out_msg . "\n";
-    exit (1);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%> -s <namenode bean name> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
-  }
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_hdfs_capacity.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_hdfs_capacity.php b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_hdfs_capacity.php
deleted file mode 100644
index af72723..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_hdfs_capacity.php
+++ /dev/null
@@ -1,109 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * check the % HDFS capacity used >= warn and critical limits.
- * check_jmx -H hostaddress -p port -w 1 -c 1
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:w:c:e:k:r:t:s:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $hosts=$options['h'];
-  $port=$options['p'];
-  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
-  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['s'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-
-  foreach (preg_split('/,/', $hosts) as $host) {
-    /* Get the json document */
-    $ch = curl_init();
-    $username = rtrim(`id -un`, "\n");
-    curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=FSNamesystemState",
-                                  CURLOPT_RETURNTRANSFER => true,
-                                  CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                  CURLOPT_USERPWD => "$username:",
-                                  CURLOPT_SSL_VERIFYPEER => FALSE ));
-    $json_string = curl_exec($ch);
-    $info = curl_getinfo($ch);
-    if (intval($info['http_code']) == 401){
-      logout();
-      $json_string = curl_exec($ch);
-    }
-    $info = curl_getinfo($ch);
-    curl_close($ch);
-    $json_array = json_decode($json_string, true);
-    $percent = 0;
-    $object = $json_array['beans'][0];
-    $CapacityUsed = $object['CapacityUsed'];
-    $CapacityRemaining = $object['CapacityRemaining'];
-    if (count($object) == 0) {
-      echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-      exit(2);
-    }    
-    $CapacityTotal = $CapacityUsed + $CapacityRemaining;
-    if($CapacityTotal == 0) {
-      $percent = 0;
-    } else {
-      $percent = ($CapacityUsed/$CapacityTotal)*100;
-      break;
-    }
-  }
-  $out_msg = "DFSUsedGB:<" . round ($CapacityUsed/(1024*1024*1024),1) .
-             ">, DFSTotalGB:<" . round($CapacityTotal/(1024*1024*1024),1) . ">";
-
-  if ($percent >= $crit) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  if ($percent >= $warn) {
-    echo "WARNING: " . $out_msg . "\n";
-    exit (1);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
-  }
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_hive_metastore_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_hive_metastore_status.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_hive_metastore_status.sh
deleted file mode 100644
index 640c077..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_hive_metastore_status.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#The uri is of the form thrift://<hostname>:<port>
-HOST=$1
-PORT=$2
-JAVA_HOME=$3
-SEC_ENABLED=$4
-if [[ "$SEC_ENABLED" == "true" ]]; then
-  NAGIOS_KEYTAB=$5
-  NAGIOS_USER=$6
-  KINIT_PATH=$7
-  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
-  if [[ "$?" -ne 0 ]]; then
-    echo "CRITICAL: Error doing kinit for nagios [$out1]";
-    exit 2;
-  fi
-fi
-HCAT_URL=-Dhive.metastore.uris="thrift://$HOST:$PORT"
-export JAVA_HOME=$JAVA_HOME
-out=`hcat $HCAT_URL -e "show databases" 2>&1`
-if [[ "$?" -ne 0 ]]; then
-  echo "CRITICAL: Error accessing Hive Metastore status [$out]";
-  exit 2;
-fi
-echo "OK: Hive Metastore status OK";
-exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_hue_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_hue_status.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_hue_status.sh
deleted file mode 100644
index 076d9b3..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_hue_status.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-status=`/etc/init.d/hue status 2>&1`
-
-if [[ "$?" -ne 0 ]]; then
-	echo "WARNING: Hue is stopped";
-	exit 1;
-fi
-
-echo "OK: Hue is running";
-exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_mapred_local_dir_used.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_mapred_local_dir_used.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_mapred_local_dir_used.sh
deleted file mode 100644
index 15c85eb..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_mapred_local_dir_used.sh
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/bin/bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-MAPRED_LOCAL_DIRS=$1
-CRITICAL=`echo $2 | cut -d % -f 1`
-IFS=","
-for mapred_dir in $MAPRED_LOCAL_DIRS
-do
-  percent=`df -hl $mapred_dir | awk '{percent=$5;} END{print percent}' | cut -d % -f 1`
-  if [ $percent -ge $CRITICAL ]; then
-    echo "CRITICAL: MapReduce local dir is full."
-    exit 2
-  fi
-done
-echo "OK: MapReduce local dir space is available."
-exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_name_dir_status.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_name_dir_status.php b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_name_dir_status.php
deleted file mode 100644
index 186166d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_name_dir_status.php
+++ /dev/null
@@ -1,93 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to namenode, get the jmx-json document
- * check the NameDirStatuses to find any offline (failed) directories
- * check_jmx -H hostaddress -p port -k keytab path -r principal name -t kinit path -s security enabled
- */
- 
-  include "hdp_nagios_init.php";
-
-  $options = getopt("h:p:e:k:r:t:s:");
-  //Check only for mandatory options
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $host=$options['h'];
-  $port=$options['p'];
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['s'];
-  $ssl_enabled=$options['e'];
-  
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-  /* Get the json document */
-  $ch = curl_init();
-  $username = rtrim(`id -un`, "\n");
-  curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo",
-                                CURLOPT_RETURNTRANSFER => true,
-                                CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                CURLOPT_USERPWD => "$username:",
-                                CURLOPT_SSL_VERIFYPEER => FALSE ));
-  $json_string = curl_exec($ch);
-  $info = curl_getinfo($ch);
-  if (intval($info['http_code']) == 401){
-    logout();
-    $json_string = curl_exec($ch);
-  }
-  $info = curl_getinfo($ch);
-  curl_close($ch);
-  $json_array = json_decode($json_string, true);
-  $object = $json_array['beans'][0];
-  if ($object['NameDirStatuses'] == "") {
-    echo "WARNING: NameNode directory status not available via ".$protocol."://".$host.":".$port."/jmx url, code " . $info['http_code'] ."\n";
-    exit(1);
-  }
-  $NameDirStatuses = json_decode($object['NameDirStatuses'], true);
-  $failed_dir_count = count($NameDirStatuses['failed']);
-  $out_msg = "CRITICAL: Offline NameNode directories: ";
-  if ($failed_dir_count > 0) {
-    foreach ($NameDirStatuses['failed'] as $key => $value) {
-      $out_msg = $out_msg . $key . ":" . $value . ", ";
-    }
-    echo $out_msg . "\n";
-    exit (2);
-  }
-  echo "OK: All NameNode directories are active" . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled";
-  }
-?>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_namenodes_ha.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_namenodes_ha.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_namenodes_ha.sh
deleted file mode 100644
index 50b075a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_namenodes_ha.sh
+++ /dev/null
@@ -1,82 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-IFS=',' read -a namenodes <<< "$1"
-port=$2
-totalNN=${#namenodes[@]}
-activeNN=()
-standbyNN=()
-unavailableNN=()
-
-for nn in "${namenodes[@]}"
-do
-  status=$(curl -m 5 -s http://$nn:$port/jmx?qry=Hadoop:service=NameNode,name=FSNamesystem | grep -i "tag.HAState" | grep -o -E "standby|active")
-  if [ "$status" == "active" ]; then
-    activeNN[${#activeNN[*]}]="$nn"
-  elif [ "$status" == "standby" ]; then
-    standbyNN[${#standbyNN[*]}]="$nn"
-  elif [ "$status" == "" ]; then
-    unavailableNN[${#unavailableNN[*]}]="$nn"
-  fi
-done
-
-message=""
-critical=false
-
-if [ ${#activeNN[@]} -gt 1 ]; then
-  critical=true
-  message=$message" Only one NN can have HAState=active;"
-elif [ ${#activeNN[@]} == 0 ]; then
-  critical=true
-  message=$message" No Active NN available;"
-elif [ ${#standbyNN[@]} == 0 ]; then
-  critical=true
-  message=$message" No Standby NN available;"
-fi
-
-NNstats=" Active<"
-for nn in "${activeNN[@]}"
-do
-  NNstats="$NNstats$nn;"
-done
-NNstats=${NNstats%\;}
-NNstats=$NNstats">, Standby<"
-for nn in "${standbyNN[@]}"
-do
-  NNstats="$NNstats$nn;"
-done
-NNstats=${NNstats%\;}
-NNstats=$NNstats">, Unavailable<"
-for nn in "${unavailableNN[@]}"
-do
-  NNstats="$NNstats$nn;"
-done
-NNstats=${NNstats%\;}
-NNstats=$NNstats">"
-
-if [ $critical == false ]; then
-  echo "OK: NameNode HA healthy;"$NNstats
-  exit 0
-fi
-
-echo "CRITICAL:"$message$NNstats
-exit 2

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_nodemanager_health.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_nodemanager_health.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_nodemanager_health.sh
deleted file mode 100644
index 020b41d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_nodemanager_health.sh
+++ /dev/null
@@ -1,44 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-HOST=$1
-PORT=$2
-NODEMANAGER_URL="http://$HOST:$PORT/ws/v1/node/info"
-SEC_ENABLED=$3
-export PATH="/usr/bin:$PATH"
-if [[ "$SEC_ENABLED" == "true" ]]; then
-  NAGIOS_KEYTAB=$4
-  NAGIOS_USER=$5
-  KINIT_PATH=$6
-  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
-  if [[ "$?" -ne 0 ]]; then
-    echo "CRITICAL: Error doing kinit for nagios [$out1]";
-    exit 2;
-  fi
-fi
-
-RESPONSE=`curl --negotiate -u : -s $NODEMANAGER_URL`
-if [[ "$RESPONSE" == *'"nodeHealthy":true'* ]]; then 
-  echo "OK: NodeManager healthy";
-  exit 0;
-fi
-echo "CRITICAL: NodeManager unhealthy";
-exit 2;

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_oozie_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_oozie_status.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_oozie_status.sh
deleted file mode 100644
index 820ee99..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_oozie_status.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-# OOZIE_URL is of the form http://<hostname>:<port>/oozie
-HOST=`echo $1 | tr '[:upper:]' '[:lower:]'`
-PORT=$2
-JAVA_HOME=$3
-SEC_ENABLED=$4
-if [[ "$SEC_ENABLED" == "true" ]]; then
-  NAGIOS_KEYTAB=$5
-  NAGIOS_USER=$6
-  KINIT_PATH=$7
-  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
-  if [[ "$?" -ne 0 ]]; then
-    echo "CRITICAL: Error doing kinit for nagios [$out1]";
-    exit 2;
-  fi
-fi
-OOZIE_URL="http://$HOST:$PORT/oozie"
-export JAVA_HOME=$JAVA_HOME
-out=`oozie admin -oozie ${OOZIE_URL} -status 2>&1`
-if [[ "$?" -ne 0 ]]; then 
-  echo "CRITICAL: Error accessing Oozie Server status [$out]";
-  exit 2;
-fi
-echo "OK: Oozie Server status [$out]";
-exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_rpcq_latency.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_rpcq_latency.php b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_rpcq_latency.php
deleted file mode 100644
index 463f69b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_rpcq_latency.php
+++ /dev/null
@@ -1,104 +0,0 @@
-<?php
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-/* This plugin makes call to master node, get the jmx-json document
- * It checks the rpc wait time in the queue, RpcQueueTime_avg_time
- * check_rpcq_latency -h hostaddress -p port -t ServiceName -w 1 -c 1
- * Warning and Critical values are in seconds
- * Service Name = JobTracker, NameNode, JobHistoryServer
- */
-
-  include "hdp_nagios_init.php";
-
-  $options = getopt ("h:p:w:c:n:e:k:r:t:s:");
-  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
-      || !array_key_exists('c', $options) || !array_key_exists('n', $options)) {
-    usage();
-    exit(3);
-  }
-
-  $host=$options['h'];
-  $port=$options['p'];
-  $master=$options['n'];
-  $warn=$options['w'];
-  $crit=$options['c'];
-  $keytab_path=$options['k'];
-  $principal_name=$options['r'];
-  $kinit_path_local=$options['t'];
-  $security_enabled=$options['s'];
-  $ssl_enabled=$options['e'];
-
-  /* Kinit if security enabled */
-  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
-  $retcode = $status[0];
-  $output = $status[1];
-  
-  if ($output != 0) {
-    echo "CRITICAL: Error doing kinit for nagios. $output";
-    exit (2);
-  }
-
-  $protocol = ($ssl_enabled == "true" ? "https" : "http");
-
-
-  /* Get the json document */
-  $ch = curl_init();
-  $username = rtrim(`id -un`, "\n");
-  curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=".$master.",name=RpcActivityForPort*",
-                                CURLOPT_RETURNTRANSFER => true,
-                                CURLOPT_HTTPAUTH => CURLAUTH_ANY,
-                                CURLOPT_USERPWD => "$username:",
-                                CURLOPT_SSL_VERIFYPEER => FALSE ));
-  $json_string = curl_exec($ch);
-  $info = curl_getinfo($ch);
-  if (intval($info['http_code']) == 401){
-    logout();
-    $json_string = curl_exec($ch);
-  }
-  $info = curl_getinfo($ch);
-  curl_close($ch);
-  $json_array = json_decode($json_string, true);
-  $object = $json_array['beans'][0];
-  if (count($object) == 0) {
-    echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
-    exit(2);
-  } 
-  $RpcQueueTime_avg_time = round($object['RpcQueueTime_avg_time'], 2); 
-  $RpcProcessingTime_avg_time = round($object['RpcProcessingTime_avg_time'], 2);
-
-  $out_msg = "RpcQueueTime_avg_time:<" . $RpcQueueTime_avg_time .
-             "> Secs, RpcProcessingTime_avg_time:<" . $RpcProcessingTime_avg_time .
-             "> Secs";
-
-  if ($RpcQueueTime_avg_time >= $crit) {
-    echo "CRITICAL: " . $out_msg . "\n";
-    exit (2);
-  }
-  if ($RpcQueueTime_avg_time >= $warn) {
-    echo "WARNING: " . $out_msg . "\n";
-    exit (1);
-  }
-  echo "OK: " . $out_msg . "\n";
-  exit(0);
-
-  /* print usage */
-  function usage () {
-    echo "Usage: $0 -h <host> -p port -n <JobTracker/NameNode/JobHistoryServer> -w <warn_in_sec> -c <crit_in_sec> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
-  }
-?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_templeton_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_templeton_status.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_templeton_status.sh
deleted file mode 100644
index 7fbc4c4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/files/check_templeton_status.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-#!/usr/bin/env bash
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-# out='{"status":"ok","version":"v1"}<status_code:200>'
-HOST=$1
-PORT=$2
-VERSION=$3
-SEC_ENABLED=$4
-if [[ "$SEC_ENABLED" == "true" ]]; then 
-  NAGIOS_KEYTAB=$5
-  NAGIOS_USER=$6
-  KINIT_PATH=$7
-  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
-  if [[ "$?" -ne 0 ]]; then
-    echo "CRITICAL: Error doing kinit for nagios [$out1]";
-    exit 2;
-  fi
-fi
-regex="^.*\"status\":\"ok\".*<status_code:200>$"
-out=`curl --negotiate -u : -s -w '<status_code:%{http_code}>' http://$HOST:$PORT/templeton/$VERSION/status 2>&1`
-if [[ $out =~ $regex ]]; then
-  out=`echo "$out" | sed -e 's/{/[/g' | sed -e 's/}/]/g'` 
-  echo "OK: WebHCat Server status [$out]";
-  exit 0;
-fi
-echo "CRITICAL: Error accessing WebHCat Server, status [$out]";
-exit 2;


[03/37] AMBARI-4341. Rename 2.0.8 to 2.1.1 in the stack definition. (mahadev)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/metrics.json b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/metrics.json
new file mode 100644
index 0000000..68efe9f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/YARN/metrics.json
@@ -0,0 +1,2534 @@
+{
+  "NODEMANAGER": {
+    "Component": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "metrics/memory/mem_total": {
+            "metric": "mem_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/memHeapCommittedM": {
+            "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/mapred/ShuffleOutputsFailed": {
+            "metric": "mapred.ShuffleOutputsFailed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/threadsRunnable": {
+            "metric": "jvm.JvmMetrics.ThreadsRunnable",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsNew": {
+            "metric": "jvm.JvmMetrics.ThreadsNew",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthorizationFailures": {
+            "metric": "rpc.metrics.RpcAuthorizationFailures",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginSuccess_avg_time": {
+            "metric": "ugi.ugi.LoginSuccessAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/RpcQueueTime_avg_time": {
+            "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/ContainersCompleted": {
+            "metric": "yarn.ContainersCompleted",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/SentBytes": {
+            "metric": "rpc.rpc.SentBytes",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memNonHeapUsedM": {
+            "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/ContainersKilled": {
+            "metric": "yarn.ContainersKilled",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/logWarn": {
+            "metric": "jvm.JvmMetrics.LogWarn",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsTimedWaiting": {
+            "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/gcCount": {
+            "metric": "jvm.JvmMetrics.GcCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/process/proc_run": {
+            "metric": "proc_run",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/swap_total": {
+            "metric": "swap_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/ReceivedBytes": {
+            "metric": "rpc.rpc.ReceivedBytes",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_nice": {
+            "metric": "cpu_nice",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/threadsBlocked": {
+            "metric": "jvm.JvmMetrics.ThreadsBlocked",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/RpcQueueTime_num_ops": {
+            "metric": "rpc.rpc.RpcQueueTimeNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/process/proc_total": {
+            "metric": "proc_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/yarn/AllocatedGB": {
+            "metric": "yarn.AllocatedGB",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/disk/part_max_used": {
+            "metric": "part_max_used",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/NumOpenConnections": {
+            "metric": "rpc.rpc.NumOpenConnections",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memHeapUsedM": {
+            "metric": "jvm.JvmMetrics.MemHeapUsedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsWaiting": {
+            "metric": "jvm.JvmMetrics.ThreadsWaiting",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/disk/disk_free": {
+            "metric": "disk_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/mapred/ShuffleOutputsOK": {
+            "metric": "mapred.ShuffleOutputsOK",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/yarn/ContainersFailed": {
+            "metric": "yarn.ContainersFailed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_buffers": {
+            "metric": "mem_buffers",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/ugi/loginSuccess_num_ops": {
+            "metric": "ugi.ugi.LoginSuccessNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/gcTimeMillis": {
+            "metric": "jvm.JvmMetrics.GcTimeMillis",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_idle": {
+            "metric": "cpu_idle",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/yarn/AllocatedContainers": {
+            "metric": "yarn.AllocatedContainers",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/threadsTerminated": {
+            "metric": "jvm.JvmMetrics.ThreadsTerminated",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/network/bytes_out": {
+            "metric": "bytes_out",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_aidle": {
+            "metric": "cpu_aidle",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_free": {
+            "metric": "mem_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_user": {
+            "metric": "cpu_user",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/swap_free": {
+            "metric": "swap_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_system": {
+            "metric": "cpu_system",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/bytes_in": {
+            "metric": "bytes_in",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/boottime": {
+            "metric": "boottime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/pkts_out": {
+            "metric": "pkts_out",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/memNonHeapCommittedM": {
+            "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/callQueueLen": {
+            "metric": "rpc.rpc.CallQueueLength",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/mem_cached": {
+            "metric": "mem_cached",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/yarn/ContainersRunning": {
+            "metric": "yarn.ContainersRunning",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/disk/disk_total": {
+            "metric": "disk_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/logInfo": {
+            "metric": "jvm.JvmMetrics.LogInfo",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/ContainersLaunched": {
+            "metric": "yarn.ContainersLaunched",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/RpcProcessingTime_num_ops": {
+            "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginFailure_num_ops": {
+            "metric": "ugi.ugi.LoginFailureNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/mem_shared": {
+            "metric": "mem_shared",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/yarn/AvailableGB": {
+            "metric": "yarn.AvailableGB",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/mapred/ShuffleConnections": {
+            "metric": "mapred.ShuffleConnections",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_wio": {
+            "metric": "cpu_wio",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/yarn/ContainersIniting": {
+            "metric": "yarn.ContainersIniting",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/logError": {
+            "metric": "jvm.JvmMetrics.LogError",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginFailure_avg_time": {
+            "metric": "ugi.ugi.LoginFailureAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_num": {
+            "metric": "cpu_num",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_speed": {
+            "metric": "cpu_speed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthorizationSuccesses": {
+            "metric": "rpc.rpc.RpcAuthorizationSuccesses",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/logFatal": {
+            "metric": "jvm.JvmMetrics.LogFatal",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/RpcProcessingTime_avg_time": {
+            "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/mapred/ShuffleOutputBytes": {
+            "metric": "mapred.ShuffleOutputBytes",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthenticationSuccesses": {
+            "metric": "rpc.metrics.RpcAuthenticationSuccesses",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthenticationFailures": {
+            "metric": "rpc.metrics.RpcAuthenticationFailures",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/network/pkts_in": {
+            "metric": "pkts_in",
+            "pointInTime": true,
+            "temporal": true
+          }
+        }
+      }
+    ],
+    "HostComponent": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "metrics/memory/mem_total": {
+            "metric": "mem_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/memHeapCommittedM": {
+            "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/mapred/ShuffleOutputsFailed": {
+            "metric": "mapred.ShuffleOutputsFailed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/threadsRunnable": {
+            "metric": "jvm.JvmMetrics.ThreadsRunnable",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsNew": {
+            "metric": "jvm.JvmMetrics.ThreadsNew",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthorizationFailures": {
+            "metric": "rpc.metrics.RpcAuthorizationFailures",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginSuccess_avg_time": {
+            "metric": "ugi.ugi.LoginSuccessAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/RpcQueueTime_avg_time": {
+            "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/ContainersCompleted": {
+            "metric": "yarn.ContainersCompleted",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/SentBytes": {
+            "metric": "rpc.rpc.SentBytes",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memNonHeapUsedM": {
+            "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/ContainersKilled": {
+            "metric": "yarn.ContainersKilled",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/logWarn": {
+            "metric": "jvm.JvmMetrics.LogWarn",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsTimedWaiting": {
+            "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/gcCount": {
+            "metric": "jvm.JvmMetrics.GcCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/process/proc_run": {
+            "metric": "proc_run",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/swap_total": {
+            "metric": "swap_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/ReceivedBytes": {
+            "metric": "rpc.rpc.ReceivedBytes",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_nice": {
+            "metric": "cpu_nice",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/threadsBlocked": {
+            "metric": "jvm.JvmMetrics.ThreadsBlocked",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/RpcQueueTime_num_ops": {
+            "metric": "rpc.rpc.RpcQueueTimeNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/process/proc_total": {
+            "metric": "proc_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/yarn/AllocatedGB": {
+            "metric": "yarn.AllocatedGB",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/disk/part_max_used": {
+            "metric": "part_max_used",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/NumOpenConnections": {
+            "metric": "rpc.rpc.NumOpenConnections",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memHeapUsedM": {
+            "metric": "jvm.JvmMetrics.MemHeapUsedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsWaiting": {
+            "metric": "jvm.JvmMetrics.ThreadsWaiting",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/disk/disk_free": {
+            "metric": "disk_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/mapred/ShuffleOutputsOK": {
+            "metric": "mapred.ShuffleOutputsOK",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/yarn/ContainersFailed": {
+            "metric": "yarn.ContainersFailed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_buffers": {
+            "metric": "mem_buffers",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/ugi/loginSuccess_num_ops": {
+            "metric": "ugi.ugi.LoginSuccessNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/gcTimeMillis": {
+            "metric": "jvm.JvmMetrics.GcTimeMillis",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_idle": {
+            "metric": "cpu_idle",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/yarn/AllocatedContainers": {
+            "metric": "yarn.AllocatedContainers",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/threadsTerminated": {
+            "metric": "jvm.JvmMetrics.ThreadsTerminated",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/network/bytes_out": {
+            "metric": "bytes_out",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_aidle": {
+            "metric": "cpu_aidle",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_free": {
+            "metric": "mem_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_user": {
+            "metric": "cpu_user",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/swap_free": {
+            "metric": "swap_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_system": {
+            "metric": "cpu_system",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/bytes_in": {
+            "metric": "bytes_in",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/boottime": {
+            "metric": "boottime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/pkts_out": {
+            "metric": "pkts_out",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/memNonHeapCommittedM": {
+            "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/callQueueLen": {
+            "metric": "rpc.rpc.CallQueueLength",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/mem_cached": {
+            "metric": "mem_cached",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/yarn/ContainersRunning": {
+            "metric": "yarn.ContainersRunning",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/disk/disk_total": {
+            "metric": "disk_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/logInfo": {
+            "metric": "jvm.JvmMetrics.LogInfo",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/ContainersLaunched": {
+            "metric": "yarn.ContainersLaunched",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/RpcProcessingTime_num_ops": {
+            "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginFailure_num_ops": {
+            "metric": "ugi.ugi.LoginFailureNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/mem_shared": {
+            "metric": "mem_shared",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/yarn/AvailableGB": {
+            "metric": "yarn.AvailableGB",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/mapred/ShuffleConnections": {
+            "metric": "mapred.ShuffleConnections",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_wio": {
+            "metric": "cpu_wio",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/yarn/ContainersIniting": {
+            "metric": "yarn.ContainersIniting",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/logError": {
+            "metric": "jvm.JvmMetrics.LogError",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginFailure_avg_time": {
+            "metric": "ugi.ugi.LoginFailureAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_num": {
+            "metric": "cpu_num",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_speed": {
+            "metric": "cpu_speed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthorizationSuccesses": {
+            "metric": "rpc.rpc.RpcAuthorizationSuccesses",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/logFatal": {
+            "metric": "jvm.JvmMetrics.LogFatal",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/RpcProcessingTime_avg_time": {
+            "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/mapred/ShuffleOutputBytes": {
+            "metric": "mapred.ShuffleOutputBytes",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthenticationSuccesses": {
+            "metric": "rpc.metrics.RpcAuthenticationSuccesses",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthenticationFailures": {
+            "metric": "rpc.metrics.RpcAuthenticationFailures",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/network/pkts_in": {
+            "metric": "pkts_in",
+            "pointInTime": true,
+            "temporal": true
+          }
+        }
+      },
+      {
+        "type": "jmx",
+        "metrics": {
+          "metrics/jvm/memHeapCommittedM": {
+            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.MemHeapCommittedM",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsRunnable": {
+            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsRunnable",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsNew": {
+            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsNew",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/memNonHeapCommittedM": {
+            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.MemNonHeapCommittedM",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/callQueueLen": {
+            "metric": "Hadoop:service=NodeManager,name=RpcActivity.CallQueueLength",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/rpcAuthorizationFailures": {
+            "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcAuthorizationFailures",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/RpcQueueTime_avg_time": {
+            "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcQueueTimeAvgTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/ugi/loginSuccess_avg_time": {
+            "metric": "Hadoop:service=NodeManager,name=UgiMetrics.LoginSuccessAvgTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/SentBytes": {
+            "metric": "Hadoop:service=NodeManager,name=RpcActivity.SentBytes",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/logInfo": {
+            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.LogInfo",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/memNonHeapUsedM": {
+            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.MemNonHeapUsedM",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/logWarn": {
+            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.LogWarn",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/ugi/loginFailure_num_ops": {
+            "metric": "Hadoop:service=NodeManager,name=UgiMetrics.LoginFailureNumOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsTimedWaiting": {
+            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsTimedWaiting",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/RpcProcessingTime_num_ops": {
+            "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcProcessingTimeNumOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/gcCount": {
+            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.GcCount",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/ReceivedBytes": {
+            "metric": "Hadoop:service=NodeManager,name=RpcActivity.ReceivedBytes",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsBlocked": {
+            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsBlocked",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/RpcQueueTime_num_ops": {
+            "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcQueueTimeNumOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/logError": {
+            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.LogError",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/ugi/loginFailure_avg_time": {
+            "metric": "Hadoop:service=NodeManager,name=UgiMetrics.LoginFailureAvgTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/rpcAuthorizationSuccesses": {
+            "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcAuthorizationSuccesses",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/NumOpenConnections": {
+            "metric": "Hadoop:service=NodeManager,name=RpcActivity.NumOpenConnections",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/memHeapUsedM": {
+            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.MemHeapUsedM",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/logFatal": {
+            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.LogFatal",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/RpcProcessingTime_avg_time": {
+            "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcProcessingTimeAvgTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsWaiting": {
+            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsWaiting",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/gcTimeMillis": {
+            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.GcTimeMillis",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/ugi/loginSuccess_num_ops": {
+            "metric": "Hadoop:service=NodeManager,name=UgiMetrics.LoginSuccessNumOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/rpcAuthenticationSuccesses": {
+            "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcAuthenticationSuccesses",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/rpcAuthenticationFailures": {
+            "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcAuthenticationFailures",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsTerminated": {
+            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsTerminated",
+            "pointInTime": true,
+            "temporal": false
+          }
+        }
+      }
+    ]
+  },
+  "RESOURCEMANAGER": {
+    "Component": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "metrics/rpcdetailed/FinishApplicationMasterNumOps": {
+            "metric": "rpcdetailed.rpcdetailed.FinishApplicationMasterNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_total": {
+            "metric": "mem_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsCompleted": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).AppsCompleted",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/ClusterMetrics/NumUnhealthyNMs": {
+            "metric": "yarn.ClusterMetrics.NumUnhealthyNMs",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memHeapCommittedM": {
+            "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsRunnable": {
+            "metric": "jvm.JvmMetrics.ThreadsRunnable",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/ClusterMetrics/NumRebootedNMs": {
+            "metric": "yarn.ClusterMetrics.NumRebootedNMs",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsNew": {
+            "metric": "jvm.JvmMetrics.ThreadsNew",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsSubmitted": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).AppsSubmitted",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/ClusterMetrics/NumLostNMs": {
+            "metric": "yarn.ClusterMetrics.NumLostNMs",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginSuccess_avg_time": {
+            "metric": "ugi.ugi.LoginSuccessAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/RpcQueueTime_avg_time": {
+            "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AllocatedContainers": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).AllocatedContainers",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/SentBytes": {
+            "metric": "rpc.rpc.SentBytes",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsKilled": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).AppsKilled",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/ClusterMetrics/NumActiveNMs": {
+            "metric": "yarn.ClusterMetrics.NumActiveNMs",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memNonHeapUsedM": {
+            "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/logWarn": {
+            "metric": "jvm.JvmMetrics.LogWarn",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsTimedWaiting": {
+            "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsFailed": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).AppsFailed",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/gcCount": {
+            "metric": "jvm.JvmMetrics.GcCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/process/proc_run": {
+            "metric": "proc_run",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/swap_total": {
+            "metric": "swap_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/ReceivedBytes": {
+            "metric": "rpc.rpc.ReceivedBytes",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/AllocateNumOps": {
+            "metric": "rpcdetailed.rpcdetailed.AllocateNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_nice": {
+            "metric": "cpu_nice",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/gcCountMarkSweepCompact": {
+            "metric": "jvm.JvmMetrics.GcCountMarkSweepCompact",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/threadsBlocked": {
+            "metric": "jvm.JvmMetrics.ThreadsBlocked",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsRunning": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).AppsRunning",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/ClusterMetrics/NumDecommissionedNMs": {
+            "metric": "yarn.ClusterMetrics.NumDecommissionedNMs",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/RpcQueueTime_num_ops": {
+            "metric": "rpc.rpc.RpcQueueTimeNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/process/proc_total": {
+            "metric": "proc_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/disk/part_max_used": {
+            "metric": "part_max_used",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/NumOpenConnections": {
+            "metric": "rpc.rpc.NumOpenConnections",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memHeapUsedM": {
+            "metric": "jvm.JvmMetrics.MemHeapUsedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/disk/disk_free": {
+            "metric": "disk_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/threadsWaiting": {
+            "metric": "jvm.JvmMetrics.ThreadsWaiting",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/load_one": {
+            "metric": "load_one",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_buffers": {
+            "metric": "mem_buffers",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/ugi/loginSuccess_num_ops": {
+            "metric": "ugi.ugi.LoginSuccessNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/gcTimeMillisCopy": {
+            "metric": "jvm.JvmMetrics.GcTimeMillisCopy",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/gcTimeMillis": {
+            "metric": "jvm.JvmMetrics.GcTimeMillis",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_idle": {
+            "metric": "cpu_idle",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/PendingContainers": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).PendingContainers",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memMaxM": {
+            "metric": "jvm.JvmMetrics.MemMaxM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsTerminated": {
+            "metric": "jvm.JvmMetrics.ThreadsTerminated",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/network/bytes_out": {
+            "metric": "bytes_out",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_aidle": {
+            "metric": "cpu_aidle",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_free": {
+            "metric": "mem_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/AllocateAvgTime": {
+            "metric": "rpcdetailed.rpcdetailed.AllocateAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_user": {
+            "metric": "cpu_user",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/swap_free": {
+            "metric": "swap_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_system": {
+            "metric": "cpu_system",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/load_five": {
+            "metric": "load_five",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/bytes_in": {
+            "metric": "bytes_in",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/boottime": {
+            "metric": "boottime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/pkts_out": {
+            "metric": "pkts_out",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/memNonHeapCommittedM": {
+            "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/GetApplicationReportNumOps": {
+            "metric": "rpcdetailed.rpcdetailed.GetApplicationReportNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/FinishApplicationMasterAvgTime": {
+            "metric": "rpcdetailed.rpcdetailed.FinishApplicationMasterAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/callQueueLen": {
+            "metric": "rpc.rpc.CallQueueLength",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/mem_cached": {
+            "metric": "mem_cached",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/RegisterApplicationMasterNumOps": {
+            "metric": "rpcdetailed.rpcdetailed.RegisterApplicationMasterNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/disk/disk_total": {
+            "metric": "disk_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AvailableMB": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).AvailableMB",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/PendingMB": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).PendingMB",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/logInfo": {
+            "metric": "jvm.JvmMetrics.LogInfo",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/RpcProcessingTime_num_ops": {
+            "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginFailure_num_ops": {
+            "metric": "ugi.ugi.LoginFailureNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/mem_shared": {
+            "metric": "mem_shared",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/SubmitApplicationAvgTime": {
+            "metric": "rpcdetailed.rpcdetailed.SubmitApplicationAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_wio": {
+            "metric": "cpu_wio",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/GetNewApplicationNumOps": {
+            "metric": "rpcdetailed.rpcdetailed.GetNewApplicationNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsPending": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).AppsPending",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/gcCountCopy": {
+            "metric": "jvm.JvmMetrics.GcCountCopy",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/load_fifteen": {
+            "metric": "load_fifteen",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/logError": {
+            "metric": "jvm.JvmMetrics.LogError",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginFailure_avg_time": {
+            "metric": "ugi.ugi.LoginFailureAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_num": {
+            "metric": "cpu_num",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/SubmitApplicationNumOps": {
+            "metric": "rpcdetailed.rpcdetailed.SubmitApplicationNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/gcTimeMillisMarkSweepCompact": {
+            "metric": "jvm.JvmMetrics.GcTimeMillisMarkSweepCompact",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_speed": {
+            "metric": "cpu_speed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthorizationSuccesses": {
+            "metric": "rpc.rpc.RpcAuthorizationSuccesses",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AllocatedMB": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).AllocatedMB",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/logFatal": {
+            "metric": "jvm.JvmMetrics.LogFatal",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/RpcProcessingTime_avg_time": {
+            "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/GetApplicationReportAvgTime": {
+            "metric": "rpcdetailed.rpcdetailed.GetApplicationReportAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/NodeHeartbeatAvgTime": {
+            "metric": "rpcdetailed.rpcdetailed.NodeHeartbeatAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/GetNewApplicationAvgTime": {
+            "metric": "rpcdetailed.rpcdetailed.GetNewApplicationAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/pkts_in": {
+            "metric": "pkts_in",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/RegisterApplicationMasterAvgTime": {
+            "metric": "rpcdetailed.rpcdetailed.RegisterApplicationMasterAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/ReservedContainers": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).ReservedContainers",
+            "pointInTime": false,
+            "temporal": true
+          }
+        }
+      },
+      {
+        "type": "jmx",
+        "metrics": {
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsFailed": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsFailed",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/rm_metrics/cluster/rebootedNMcount": {
+            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumRebootedNMs",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/memHeapCommittedM": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemHeapCommittedM",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/ClusterMetrics/NumUnhealthyNMs": {
+            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumUnhealthyNMs",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/HeapMemoryMax":{
+            "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
+            "pointInTime" : true,
+            "temporal" : false
+          },
+          "metrics/jvm/HeapMemoryUsed":{
+            "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
+            "pointInTime" : true,
+            "temporal" : false
+          },
+          "metrics/jvm/NonHeapMemoryMax":{
+            "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
+            "pointInTime" : true,
+            "temporal" : false
+          },
+          "metrics/jvm/NonHeapMemoryUsed":{
+            "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
+            "pointInTime" : true,
+            "temporal" : false
+          },
+          "metrics/jvm/threadsRunnable": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsRunnable",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsNew": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsNew",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/ClusterMetrics/NumRebootedNMs": {
+            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumRebootedNMs",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/runtime/StartTime": {
+            "metric": "java.lang:type=Runtime.StartTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsKilled": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsKilled",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/rpcAuthorizationFailures": {
+            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcAuthorizationFailures",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AggregateContainersAllocated": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AggregateContainersAllocated",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/ClusterMetrics/NumLostNMs": {
+            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumLostNMs",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/ugi/loginSuccess_avg_time": {
+            "metric": "Hadoop:service=ResourceManager,name=UgiMetrics.LoginSuccessAvgTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/StartTime": {
+            "metric": "java.lang:type=Runtime.StartTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/RpcQueueTime_avg_time": {
+            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcQueueTimeAvgTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/ReservedContainers": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).ReservedContainers",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsSubmitted": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsSubmitted",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/SentBytes": {
+            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.SentBytes",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/ClusterMetrics/NumActiveNMs": {
+            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumActiveNMs",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/running_300": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).running_300",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/memNonHeapUsedM": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemNonHeapUsedM",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/logWarn": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.LogWarn",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsTimedWaiting": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsTimedWaiting",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/gcCount": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.GcCount",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/ReceivedBytes": {
+            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.ReceivedBytes",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsBlocked": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsBlocked",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/running_60": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).running_60",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/RpcQueueTime_num_ops": {
+            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcQueueTimeNumOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/ClusterMetrics/NumDecommissionedNMs": {
+            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumDecommissionedNMs",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AllocatedContainers": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AllocatedContainers",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/PendingContainers": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).PendingContainers",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/NumOpenConnections": {
+            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.NumOpenConnections",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/memHeapUsedM": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemHeapUsedM",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsWaiting": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsWaiting",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/gcTimeMillis": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.GcTimeMillis",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/ugi/loginSuccess_num_ops": {
+            "metric": "Hadoop:service=ResourceManager,name=UgiMetrics.LoginSuccessNumOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsTerminated": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsTerminated",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/memMaxM": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemMaxM",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/rm_metrics/cluster/unhealthyNMcount": {
+            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumUnhealthyNMs",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/ReservedVCores": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).ReservedVCores",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/rm_metrics/cluster/decommissionedNMcount": {
+            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumDecommissionedNMs",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/startTime": {
+            "metric": "java.lang:type=Runtime.StartTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/ActiveApplications": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).ActiveApplications",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AvailableMB": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AvailableMB",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/memNonHeapCommittedM": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemNonHeapCommittedM",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/rm_metrics/cluster/nodeManagers": {
+            "metric": "Hadoop:service=ResourceManager,name=RMNMInfo.LiveNodeManagers",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/callQueueLen": {
+            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.CallQueueLength",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AllocatedVCores": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AllocatedVCores",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsPending": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsPending",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsCompleted": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsCompleted",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/ActiveUsers": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).ActiveUsers",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/logInfo": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.LogInfo",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsRunning": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsRunning",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/running_1440": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).running_1440",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AvailableVCores": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AvailableVCores",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/ugi/loginFailure_num_ops": {
+            "metric": "Hadoop:service=ResourceManager,name=UgiMetrics.LoginFailureNumOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/RpcProcessingTime_num_ops": {
+            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcProcessingTimeNumOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/ReservedMB": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).ReservedMB",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/logError": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.LogError",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/PendingMB": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).PendingMB",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/ugi/loginFailure_avg_time": {
+            "metric": "Hadoop:service=ResourceManager,name=UgiMetrics.LoginFailureAvgTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/rpcAuthorizationSuccesses": {
+            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcAuthorizationSuccesses",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/logFatal": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.LogFatal",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/RpcProcessingTime_avg_time": {
+            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcProcessingTimeAvgTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/rm_metrics/cluster/activeNMcount": {
+            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumActiveNMs",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/rpcAuthenticationSuccesses": {
+            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcAuthenticationSuccesses",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AggregateContainersReleased": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AggregateContainersReleased",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/rpcAuthenticationFailures": {
+            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcAuthenticationFailures",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/rm_metrics/cluster/lostNMcount": {
+            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumLostNMs",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AllocatedMB": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AllocatedMB",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/PendingVCores": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).PendingVCores",
+            "pointInTime": true,
+            "temporal": false
+          }
+        }
+      }
+    ],
+    "HostComponent": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "metrics/rpcdetailed/FinishApplicationMasterNumOps": {
+            "metric": "rpcdetailed.rpcdetailed.FinishApplicationMasterNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_total": {
+            "metric": "mem_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsCompleted": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).AppsCompleted",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/ClusterMetrics/NumUnhealthyNMs": {
+            "metric": "yarn.ClusterMetrics.NumUnhealthyNMs",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memHeapCommittedM": {
+            "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsRunnable": {
+            "metric": "jvm.JvmMetrics.ThreadsRunnable",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/ClusterMetrics/NumRebootedNMs": {
+            "metric": "yarn.ClusterMetrics.NumRebootedNMs",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsNew": {
+            "metric": "jvm.JvmMetrics.ThreadsNew",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsSubmitted": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).AppsSubmitted",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/ClusterMetrics/NumLostNMs": {
+            "metric": "yarn.ClusterMetrics.NumLostNMs",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginSuccess_avg_time": {
+            "metric": "ugi.ugi.LoginSuccessAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/RpcQueueTime_avg_time": {
+            "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AllocatedContainers": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).AllocatedContainers",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/SentBytes": {
+            "metric": "rpc.rpc.SentBytes",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsKilled": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).AppsKilled",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/ClusterMetrics/NumActiveNMs": {
+            "metric": "yarn.ClusterMetrics.NumActiveNMs",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memNonHeapUsedM": {
+            "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/logWarn": {
+            "metric": "jvm.JvmMetrics.LogWarn",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsTimedWaiting": {
+            "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsFailed": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).AppsFailed",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/gcCount": {
+            "metric": "jvm.JvmMetrics.GcCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/process/proc_run": {
+            "metric": "proc_run",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/swap_total": {
+            "metric": "swap_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/ReceivedBytes": {
+            "metric": "rpc.rpc.ReceivedBytes",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/AllocateNumOps": {
+            "metric": "rpcdetailed.rpcdetailed.AllocateNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_nice": {
+            "metric": "cpu_nice",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/gcCountMarkSweepCompact": {
+            "metric": "jvm.JvmMetrics.GcCountMarkSweepCompact",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/threadsBlocked": {
+            "metric": "jvm.JvmMetrics.ThreadsBlocked",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsRunning": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).AppsRunning",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/ClusterMetrics/NumDecommissionedNMs": {
+            "metric": "yarn.ClusterMetrics.NumDecommissionedNMs",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/RpcQueueTime_num_ops": {
+            "metric": "rpc.rpc.RpcQueueTimeNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/process/proc_total": {
+            "metric": "proc_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/disk/part_max_used": {
+            "metric": "part_max_used",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/NumOpenConnections": {
+            "metric": "rpc.rpc.NumOpenConnections",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memHeapUsedM": {
+            "metric": "jvm.JvmMetrics.MemHeapUsedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/disk/disk_free": {
+            "metric": "disk_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/threadsWaiting": {
+            "metric": "jvm.JvmMetrics.ThreadsWaiting",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/load_one": {
+            "metric": "load_one",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_buffers": {
+            "metric": "mem_buffers",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/ugi/loginSuccess_num_ops": {
+            "metric": "ugi.ugi.LoginSuccessNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/gcTimeMillisCopy": {
+            "metric": "jvm.JvmMetrics.GcTimeMillisCopy",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/gcTimeMillis": {
+            "metric": "jvm.JvmMetrics.GcTimeMillis",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_idle": {
+            "metric": "cpu_idle",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/PendingContainers": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).PendingContainers",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memMaxM": {
+            "metric": "jvm.JvmMetrics.MemMaxM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsTerminated": {
+            "metric": "jvm.JvmMetrics.ThreadsTerminated",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/network/bytes_out": {
+            "metric": "bytes_out",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_aidle": {
+            "metric": "cpu_aidle",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_free": {
+            "metric": "mem_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/AllocateAvgTime": {
+            "metric": "rpcdetailed.rpcdetailed.AllocateAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_user": {
+            "metric": "cpu_user",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/swap_free": {
+            "metric": "swap_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_system": {
+            "metric": "cpu_system",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/load_five": {
+            "metric": "load_five",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/bytes_in": {
+            "metric": "bytes_in",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/boottime": {
+            "metric": "boottime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/pkts_out": {
+            "metric": "pkts_out",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/memNonHeapCommittedM": {
+            "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/GetApplicationReportNumOps": {
+            "metric": "rpcdetailed.rpcdetailed.GetApplicationReportNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/FinishApplicationMasterAvgTime": {
+            "metric": "rpcdetailed.rpcdetailed.FinishApplicationMasterAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/callQueueLen": {
+            "metric": "rpc.rpc.CallQueueLength",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/mem_cached": {
+            "metric": "mem_cached",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/RegisterApplicationMasterNumOps": {
+            "metric": "rpcdetailed.rpcdetailed.RegisterApplicationMasterNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/disk/disk_total": {
+            "metric": "disk_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AvailableMB": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).AvailableMB",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/PendingMB": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).PendingMB",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/logInfo": {
+            "metric": "jvm.JvmMetrics.LogInfo",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/RpcProcessingTime_num_ops": {
+            "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginFailure_num_ops": {
+            "metric": "ugi.ugi.LoginFailureNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/mem_shared": {
+            "metric": "mem_shared",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/SubmitApplicationAvgTime": {
+            "metric": "rpcdetailed.rpcdetailed.SubmitApplicationAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_wio": {
+            "metric": "cpu_wio",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/GetNewApplicationNumOps": {
+            "metric": "rpcdetailed.rpcdetailed.GetNewApplicationNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsPending": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).AppsPending",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/gcCountCopy": {
+            "metric": "jvm.JvmMetrics.GcCountCopy",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/load_fifteen": {
+            "metric": "load_fifteen",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/logError": {
+            "metric": "jvm.JvmMetrics.LogError",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginFailure_avg_time": {
+            "metric": "ugi.ugi.LoginFailureAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_num": {
+            "metric": "cpu_num",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/SubmitApplicationNumOps": {
+            "metric": "rpcdetailed.rpcdetailed.SubmitApplicationNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/gcTimeMillisMarkSweepCompact": {
+            "metric": "jvm.JvmMetrics.GcTimeMillisMarkSweepCompact",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_speed": {
+            "metric": "cpu_speed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthorizationSuccesses": {
+            "metric": "rpc.rpc.RpcAuthorizationSuccesses",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AllocatedMB": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).AllocatedMB",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/logFatal": {
+            "metric": "jvm.JvmMetrics.LogFatal",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/RpcProcessingTime_avg_time": {
+            "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/GetApplicationReportAvgTime": {
+            "metric": "rpcdetailed.rpcdetailed.GetApplicationReportAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/NodeHeartbeatAvgTime": {
+            "metric": "rpcdetailed.rpcdetailed.NodeHeartbeatAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/GetNewApplicationAvgTime": {
+            "metric": "rpcdetailed.rpcdetailed.GetNewApplicationAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/pkts_in": {
+            "metric": "pkts_in",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/RegisterApplicationMasterAvgTime": {
+            "metric": "rpcdetailed.rpcdetailed.RegisterApplicationMasterAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/ReservedContainers": {
+            "metric": "yarn.QueueMetrics.Queue=(.+).ReservedContainers",
+            "pointInTime": false,
+            "temporal": true
+          }
+        }
+      },
+      {
+        "type": "jmx",
+        "metrics": {
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsFailed": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsFailed",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/memHeapCommittedM": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemHeapCommittedM",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/ClusterMetrics/NumUnhealthyNMs": {
+            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumUnhealthyNMs",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsRunnable": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsRunnable",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsNew": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsNew",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/ClusterMetrics/NumRebootedNMs": {
+            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumRebootedNMs",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsKilled": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsKilled",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/rpcAuthorizationFailures": {
+            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcAuthorizationFailures",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AggregateContainersAllocated": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AggregateContainersAllocated",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/ClusterMetrics/NumLostNMs": {
+            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumLostNMs",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/RpcQueueTime_avg_time": {
+            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcQueueTimeAvgTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/ugi/loginSuccess_avg_time": {
+            "metric": "Hadoop:service=ResourceManager,name=UgiMetrics.LoginSuccessAvgTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/ReservedContainers": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).ReservedContainers",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsSubmitted": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsSubmitted",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/SentBytes": {
+            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.SentBytes",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/ClusterMetrics/NumActiveNMs": {
+            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumActiveNMs",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/running_300": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).running_300",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/HeapMemoryMax":{
+            "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
+            "pointInTime" : true,
+            "temporal" : false
+          },
+          "metrics/jvm/HeapMemoryUsed":{
+            "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
+            "pointInTime" : true,
+            "temporal" : false
+          },
+          "metrics/jvm/NonHeapMemoryMax":{
+            "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
+            "pointInTime" : true,
+            "temporal" : false
+          },
+          "metrics/jvm/NonHeapMemoryUsed":{
+            "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
+            "pointInTime" : true,
+            "temporal" : false
+          },
+          "metrics/jvm/memNonHeapUsedM": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemNonHeapUsedM",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/logWarn": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.LogWarn",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsTimedWaiting": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsTimedWaiting",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/gcCount": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.GcCount",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/ReceivedBytes": {
+            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.ReceivedBytes",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsBlocked": {
+            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsBlocked",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/running_60": {
+            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).running_60",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/RpcQueueTime_num_ops": {
+            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcQueueTimeNumOps",
+            "pointInTime": true,
+            "temporal": false
+      

<TRUNCATED>

[30/37] AMBARI-4341. Rename 2.0.8 to 2.1.1 in the stack definition. (mahadev)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/scripts/params.py
new file mode 100644
index 0000000..86e962c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/scripts/params.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+# server configurations
+config = Script.get_config()
+
+pig_conf_dir = "/etc/pig/conf"
+hadoop_conf_dir = "/etc/hadoop/conf"
+hdfs_user = config['configurations']['global']['hdfs_user']
+smokeuser = config['configurations']['global']['smokeuser']
+user_group = config['configurations']['global']['user_group']
+
+# not supporting 32 bit jdk.
+java64_home = config['hostLevelParams']['java_home']
+hadoop_home = "/usr"
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/scripts/pig.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/scripts/pig.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/scripts/pig.py
new file mode 100644
index 0000000..c2d7b02
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/scripts/pig.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+def pig():
+  import params
+
+  Directory( params.pig_conf_dir,
+    owner = params.hdfs_user,
+    group = params.user_group
+  )
+
+  pig_TemplateConfig( ['pig-env.sh','pig.properties','log4j.properties'])
+  
+  
+def pig_TemplateConfig(name):
+  import params
+  
+  if not isinstance(name, list):
+    name = [name]
+    
+  for x in name:
+    TemplateConfig( format("{pig_conf_dir}/{x}"),
+        owner = params.hdfs_user
+    )
+  
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/scripts/pig_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/scripts/pig_client.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/scripts/pig_client.py
new file mode 100644
index 0000000..acd0cb1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/scripts/pig_client.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+from pig import pig
+
+         
+class PigClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+    
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    pig()
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+    
+#for tests
+def main():
+  command_type = 'install'
+  command_data_file = '/root/workspace/Pig/input.json'
+  basedir = '/root/workspace/Pig/main'
+  sys.argv = ["", command_type, command_data_file, basedir]
+  
+  PigClient().execute()
+  
+if __name__ == "__main__":
+  #main()
+  PigClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/scripts/service_check.py
new file mode 100644
index 0000000..3cca087
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/scripts/service_check.py
@@ -0,0 +1,75 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+class PigServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    
+    input_file = 'passwd'
+    output_file = "pigsmoke.out"
+  
+    cleanup_cmd = format("dfs -rmr {output_file} {input_file}")
+    #cleanup put below to handle retries; if retrying there wil be a stale file that needs cleanup; exit code is fn of second command
+    create_file_cmd = format("{cleanup_cmd}; hadoop dfs -put /etc/passwd {input_file} ") #TODO: inconsistent that second command needs hadoop
+    test_cmd = format("fs -test -e {output_file}")
+  
+    ExecuteHadoop( create_file_cmd,
+      tries     = 3,
+      try_sleep = 5,
+      user      = params.smokeuser,
+      conf_dir = params.hadoop_conf_dir
+    )
+  
+    File( '/tmp/pigSmoke.sh',
+      content = StaticFile("pigSmoke.sh"),
+      mode = 0755
+    )
+  
+    Execute( "pig /tmp/pigSmoke.sh",
+      tries     = 3,
+      try_sleep = 5,
+      path      = '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+      user      = params.smokeuser,
+      logoutput = True
+    )
+  
+    ExecuteHadoop( test_cmd,
+      user      = params.smokeuser,
+      conf_dir = params.hadoop_conf_dir
+    )
+    
+def main():
+  import sys
+  command_type = 'service_check'
+  command_data_file = '/root/workspace/Pig/input.json'
+  basedir = '/root/workspace/Pig/main'
+  sys.argv = ["", command_type, command_data_file, basedir]
+  
+  PigServiceCheck().execute()
+  
+if __name__ == "__main__":
+  #main()
+  PigServiceCheck().execute()
+  

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/templates/log4j.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/templates/log4j.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/templates/log4j.properties.j2
new file mode 100644
index 0000000..9ef6e2c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/templates/log4j.properties.j2
@@ -0,0 +1,30 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+# ***** Set root logger level to DEBUG and its only appender to A.
+log4j.logger.org.apache.pig=info, A
+
+# ***** A is set to be a ConsoleAppender.
+log4j.appender.A=org.apache.log4j.ConsoleAppender
+# ***** A uses PatternLayout.
+log4j.appender.A.layout=org.apache.log4j.PatternLayout
+log4j.appender.A.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/templates/pig-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/templates/pig-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/templates/pig-env.sh.j2
new file mode 100644
index 0000000..b0e17d4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/templates/pig-env.sh.j2
@@ -0,0 +1,17 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+JAVA_HOME={{java64_home}}
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/templates/pig.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/templates/pig.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/templates/pig.properties.j2
new file mode 100644
index 0000000..6fcb233
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/PIG/package/templates/pig.properties.j2
@@ -0,0 +1,55 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+# Pig configuration file. All values can be overwritten by command line arguments.
+
+# log4jconf log4j configuration file
+# log4jconf=./conf/log4j.properties
+
+# a file that contains pig script
+#file=
+
+# load jarfile, colon separated
+#jar=
+
+#verbose print all log messages to screen (default to print only INFO and above to screen)
+#verbose=true
+
+#exectype local|mapreduce, mapreduce is default
+#exectype=local
+
+#pig.logfile=
+
+#Do not spill temp files smaller than this size (bytes)
+#pig.spill.size.threshold=5000000
+#EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)
+#This should help reduce the number of files being spilled.
+#pig.spill.gc.activation.size=40000000
+
+#the following two parameters are to help estimate the reducer number
+#pig.exec.reducers.bytes.per.reducer=1000000000
+#pig.exec.reducers.max=999
+
+#Use this option only when your Pig job will otherwise die because of
+#using more counter than hadoop configured limit
+#pig.disable.counter=true
+hcat.bin=/usr/bin/hcat

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/scripts/__init__.py
new file mode 100644
index 0000000..3860581
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/scripts/__init__.py
@@ -0,0 +1,18 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/scripts/params.py
new file mode 100644
index 0000000..8f7eb21
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/scripts/params.py
@@ -0,0 +1,36 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+security_enabled = config['configurations']['global']['security_enabled']
+smokeuser = config['configurations']['global']['smokeuser']
+user_group = config['configurations']['global']['user_group']
+
+sqoop_conf_dir = "/usr/lib/sqoop/conf"
+hbase_home = "/usr"
+hive_home = "/usr"
+zoo_conf_dir = "/etc/zookeeper"
+sqoop_lib = "/usr/lib/sqoop/lib"
+sqoop_user = "sqoop"
+
+keytab_path = config['configurations']['global']['keytab_path']
+smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
+kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/scripts/service_check.py
new file mode 100644
index 0000000..b872be6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/scripts/service_check.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+
+from resource_management import *
+
+
+class SqoopServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    if params.security_enabled:
+        Execute(format("{kinit_path_local}  -kt {smoke_user_keytab} {smokeuser}"))
+    Execute("sqoop version",
+            user = params.smokeuser,
+            logoutput = True
+    )
+
+if __name__ == "__main__":
+  SqoopServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/scripts/sqoop.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/scripts/sqoop.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/scripts/sqoop.py
new file mode 100644
index 0000000..492550e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/scripts/sqoop.py
@@ -0,0 +1,51 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management import *
+import sys
+
+def sqoop(type=None):
+  import params
+  Link(params.sqoop_lib + "/mysql-connector-java.jar",
+       to = '/usr/share/java/mysql-connector-java.jar'
+  )
+  Directory(params.sqoop_conf_dir,
+            owner = params.sqoop_user,
+            group = params.user_group
+  )
+  sqoop_TemplateConfig("sqoop-env.sh")
+  File (params.sqoop_conf_dir + "/sqoop-env-template.sh",
+          owner = params.sqoop_user,
+          group = params.user_group
+  )
+  File (params.sqoop_conf_dir + "/sqoop-site-template.xml",
+         owner = params.sqoop_user,
+         group = params.user_group
+  )
+  File (params.sqoop_conf_dir + "/sqoop-site.xml",
+         owner = params.sqoop_user,
+         group = params.user_group
+  )
+  pass
+
+def sqoop_TemplateConfig(name, tag=None):
+  import params
+  TemplateConfig( format("{sqoop_conf_dir}/{name}"),
+                  owner = params.sqoop_user,
+                  template_tag = tag
+  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/scripts/sqoop_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/scripts/sqoop_client.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/scripts/sqoop_client.py
new file mode 100644
index 0000000..bd2863c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/scripts/sqoop_client.py
@@ -0,0 +1,40 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import sys
+from resource_management import *
+
+from sqoop import sqoop
+
+
+class SqoopClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    sqoop(type='client')
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+  SqoopClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/templates/sqoop-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/templates/sqoop-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/templates/sqoop-env.sh.j2
new file mode 100644
index 0000000..90cbc75
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/SQOOP/package/templates/sqoop-env.sh.j2
@@ -0,0 +1,36 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# included in all the hadoop scripts with source command
+# should not be executable directly
+# also should not be passed any arguments, since we need original $*
+
+# Set Hadoop-specific environment variables here.
+
+#Set path to where bin/hadoop is available
+#Set path to where bin/hadoop is available
+export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
+
+#set the path to where bin/hbase is available
+export HBASE_HOME=${HBASE_HOME:-/usr/lib/hbase}
+
+#Set the path to where bin/hive is available
+export HIVE_HOME=${HIVE_HOME:-/usr/lib/hive}
+
+#Set the path for where zookeper config dir is
+export ZOOCFGDIR=${ZOOCFGDIR:-/etc/zookeeper/conf}
+
+# add libthrift in hive to sqoop class path first so hive imports work
+export SQOOP_USER_CLASSPATH="`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:${SQOOP_USER_CLASSPATH}"

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/files/templetonSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/files/templetonSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/files/templetonSmoke.sh
new file mode 100644
index 0000000..cefc4f0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/files/templetonSmoke.sh
@@ -0,0 +1,95 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+export ttonhost=$1
+export smoke_test_user=$2
+export smoke_user_keytab=$3
+export security_enabled=$4
+export kinit_path_local=$5
+export ttonurl="http://${ttonhost}:50111/templeton/v1"
+
+if [[ $security_enabled == "true" ]]; then
+  kinitcmd="${kinit_path_local}  -kt ${smoke_user_keytab} ${smoke_test_user}; "
+else
+  kinitcmd=""
+fi
+
+cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>'    $ttonurl/status 2>&1"
+retVal=`su - ${smoke_test_user} -c "$cmd"`
+httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
+
+if [[ "$httpExitCode" -ne "200" ]] ; then
+  echo "Templeton Smoke Test (status cmd): Failed. : $retVal"
+  export TEMPLETON_EXIT_CODE=1
+  exit 1
+fi
+
+exit 0
+
+#try hcat ddl command
+echo "user.name=${smoke_test_user}&exec=show databases;" /tmp/show_db.post.txt
+cmd="${kinitcmd}curl --negotiate -u : -s -w 'http_code <%{http_code}>' -d  \@${destdir}/show_db.post.txt  $ttonurl/ddl 2>&1"
+retVal=`su - ${smoke_test_user} -c "$cmd"`
+httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
+
+if [[ "$httpExitCode" -ne "200" ]] ; then
+  echo "Templeton Smoke Test (ddl cmd): Failed. : $retVal"
+  export TEMPLETON_EXIT_CODE=1
+  exit  1
+fi
+
+# NOT SURE?? SUHAS
+if [[ $security_enabled == "true" ]]; then
+  echo "Templeton Pig Smoke Tests not run in secure mode"
+  exit 0
+fi
+
+#try pig query
+outname=${smoke_test_user}.`date +"%M%d%y"`.$$;
+ttonTestOutput="/tmp/idtest.${outname}.out";
+ttonTestInput="/tmp/idtest.${outname}.in";
+ttonTestScript="idtest.${outname}.pig"
+
+echo "A = load '$ttonTestInput' using PigStorage(':');"  > /tmp/$ttonTestScript
+echo "B = foreach A generate \$0 as id; " >> /tmp/$ttonTestScript
+echo "store B into '$ttonTestOutput';" >> /tmp/$ttonTestScript
+
+#copy pig script to hdfs
+su - ${smoke_test_user} -c "hadoop dfs -copyFromLocal /tmp/$ttonTestScript /tmp/$ttonTestScript"
+
+#copy input file to hdfs
+su - ${smoke_test_user} -c "hadoop dfs -copyFromLocal /etc/passwd $ttonTestInput"
+
+#create, copy post args file
+echo -n "user.name=${smoke_test_user}&file=/tmp/$ttonTestScript" > /tmp/pig_post.txt
+
+#submit pig query
+cmd="curl -s -w 'http_code <%{http_code}>' -d  \@${destdir}/pig_post.txt  $ttonurl/pig 2>&1"
+retVal=`su - ${smoke_test_user} -c "$cmd"`
+httpExitCode=`echo $retVal |sed 's/.*http_code <\([0-9]*\)>.*/\1/'`
+if [[ "$httpExitCode" -ne "200" ]] ; then
+  echo "Templeton Smoke Test (pig cmd): Failed. : $retVal"
+  export TEMPLETON_EXIT_CODE=1
+  exit 1
+fi
+
+exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/__init__.py
new file mode 100644
index 0000000..a582077
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/__init__.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/params.py
new file mode 100644
index 0000000..60b52a7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/params.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+import status_params
+
+# server configurations
+config = Script.get_config()
+
+webhcat_user = config['configurations']['global']['webhcat_user']
+download_url = config['configurations']['global']['apache_artifacts_download_url']
+
+config_dir = '/etc/hcatalog/conf'
+
+templeton_log_dir = config['configurations']['global']['hcat_log_dir']
+templeton_pid_dir = status_params.templeton_pid_dir
+
+pid_file = status_params.pid_file
+
+hadoop_conf_dir = config['configurations']['webhcat-site']['templeton.hadoop.conf.dir']
+templeton_jar = config['configurations']['webhcat-site']['templeton.jar']
+
+hadoop_home = '/usr'
+user_group = config['configurations']['global']['user_group']
+
+webhcat_server_host = config['clusterHostInfo']['webhcat_server_host']
+
+webhcat_apps_dir = "/apps/webhcat"
+smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
+smokeuser = config['configurations']['global']['smokeuser']
+security_enabled = config['configurations']['global']['security_enabled']
+kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/service_check.py
new file mode 100644
index 0000000..58b4d25
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/service_check.py
@@ -0,0 +1,45 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+from resource_management import *
+
+class WebHCatServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    File('/tmp/templetonSmoke.sh',
+         content= StaticFile('templetonSmoke.sh'),
+         mode=0755
+    )
+
+    cmd = format("sh /tmp/templetonSmoke.sh {webhcat_server_host[0]} {smokeuser} {smokeuser_keytab}"
+                 " {security_enabled} {kinit_path_local}",
+                 smokeuser_keytab=params.smoke_user_keytab if params.security_enabled else "no_keytab")
+
+    Execute(cmd,
+            tries=3,
+            try_sleep=5,
+            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+            logoutput=True)
+
+if __name__ == "__main__":
+  WebHCatServiceCheck().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/status_params.py
new file mode 100644
index 0000000..21dde6f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/status_params.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+templeton_pid_dir = config['configurations']['global']['hcat_pid_dir']
+pid_file = format('{templeton_pid_dir}/webhcat.pid')

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/webhcat.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/webhcat.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/webhcat.py
new file mode 100644
index 0000000..ae12f54
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/webhcat.py
@@ -0,0 +1,120 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+from resource_management import *
+import sys
+
+
+def webhcat():
+  import params
+
+  Directory(params.templeton_pid_dir,
+            owner=params.webhcat_user,
+            mode=0755,
+            group=params.user_group,
+            recursive=True)
+
+  Directory(params.templeton_log_dir,
+            owner=params.webhcat_user,
+            mode=0755,
+            group=params.user_group,
+            recursive=True)
+
+  Directory(params.config_dir,
+            owner=params.webhcat_user,
+            group=params.user_group)
+
+  XmlConfig("webhcat-site.xml",
+            conf_dir=params.config_dir,
+            configurations=params.config['configurations']['webhcat-site'],
+            owner=params.webhcat_user,
+            group=params.user_group,
+  )
+
+  File(format("{config_dir}/webhcat-env.sh"),
+       owner=params.webhcat_user,
+       group=params.user_group,
+       content=Template('webhcat-env.sh.j2')
+  )
+
+  if params.security_enabled:
+    kinit_if_needed = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser};")
+  else:
+    kinit_if_needed = ""
+
+  if kinit_if_needed:
+    Execute(kinit_if_needed,
+            user=params.webhcat_user,
+            path='/bin'
+    )
+
+  copyFromLocal(path='/usr/lib/hadoop/contrib/streaming/hadoop-streaming*.jar',
+                owner=params.webhcat_user,
+                mode=0755,
+                dest_dir=format("{webhcat_apps_dir}/hadoop-streaming.jar"),
+                kinnit_if_needed=kinit_if_needed
+  )
+
+  copyFromLocal(path='/usr/share/HDP-webhcat/pig.tar.gz',
+                owner=params.webhcat_user,
+                mode=0755,
+                dest_dir=format("{webhcat_apps_dir}/pig.tar.gz"),
+  )
+
+  copyFromLocal(path='/usr/share/HDP-webhcat/hive.tar.gz',
+                owner=params.webhcat_user,
+                mode=0755,
+                dest_dir=format("{webhcat_apps_dir}/hive.tar.gz")
+  )
+
+
+def copyFromLocal(path=None, owner=None, group=None, mode=None, dest_dir=None, kinnit_if_needed=""):
+  import params
+
+  copy_cmd = format("fs -copyFromLocal {path} {dest_dir}")
+  unless_cmd = format("{kinnit_if_needed} hadoop fs -ls {dest_dir} >/dev/null 2>&1")
+
+  ExecuteHadoop(copy_cmd,
+                not_if=unless_cmd,
+                user=owner,
+                conf_dir=params.hadoop_conf_dir)
+
+  if not owner:
+    chown = None
+  else:
+    if not group:
+      chown = owner
+    else:
+      chown = format('{owner}:{group}')
+
+  if not chown:
+    chown_cmd = format("fs -chown {chown} {dest_dir}")
+
+    ExecuteHadoop(copy_cmd,
+                  user=owner,
+                  conf_dir=params.hadoop_conf_dir)
+
+  if not mode:
+    chmod_cmd = format('fs -chmod {mode} {dest_dir}')
+
+    ExecuteHadoop(chmod_cmd,
+                  user=owner,
+                  conf_dir=params.hadoop_conf_dir)

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/webhcat_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/webhcat_server.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/webhcat_server.py
new file mode 100644
index 0000000..4365111
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/webhcat_server.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+import sys
+from resource_management import *
+
+from webhcat import webhcat
+from webhcat_service import webhcat_service
+
+class WebHCatServer(Script):
+  def install(self, env):
+    self.install_packages(env)
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    webhcat()
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    webhcat_service(action = 'start')
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    webhcat_service(action = 'stop')
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.pid_file)
+
+if __name__ == "__main__":
+  WebHCatServer().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/webhcat_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/webhcat_service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/webhcat_service.py
new file mode 100644
index 0000000..12c3854
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/scripts/webhcat_service.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+from resource_management import *
+
+def webhcat_service(action='start'):
+  import params
+
+  cmd = format('env HADOOP_HOME={hadoop_home} /usr/lib/hcatalog/sbin/webhcat_server.sh')
+
+  if action == 'start':
+    demon_cmd = format('{cmd} start')
+    no_op_test = format('ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1')
+    Execute(demon_cmd,
+            user=params.webhcat_user,
+            not_if=no_op_test
+    )
+  elif action == 'stop':
+    demon_cmd = format('{cmd} stop')
+    Execute(demon_cmd,
+            user=params.webhcat_user
+    )
+    Execute(format('rm -f {pid_file}'))

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/templates/webhcat-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/templates/webhcat-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/templates/webhcat-env.sh.j2
new file mode 100644
index 0000000..9ea4a79
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/WEBHCAT/package/templates/webhcat-env.sh.j2
@@ -0,0 +1,44 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+# The file containing the running pid
+PID_FILE={{pid_file}}
+
+TEMPLETON_LOG_DIR={{templeton_log_dir}}/
+
+
+WEBHCAT_LOG_DIR={{templeton_log_dir}}/
+
+# The console error log
+ERROR_LOG={{templeton_log_dir}}/webhcat-console-error.log
+
+# The console log
+CONSOLE_LOG={{templeton_log_dir}}/webhcat-console.log
+
+#TEMPLETON_JAR=templeton_jar_name
+
+#HADOOP_PREFIX=hadoop_prefix
+
+#HCAT_PREFIX=hive_prefix
+
+# Set HADOOP_HOME to point to a specific hadoop install directory
+export HADOOP_HOME=/usr/lib/hadoop

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/files/zkEnv.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/files/zkEnv.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/files/zkEnv.sh
new file mode 100644
index 0000000..07017e1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/files/zkEnv.sh
@@ -0,0 +1,96 @@
+#!/bin/sh
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This script should be sourced into other zookeeper
+# scripts to setup the env variables
+
+# We use ZOOCFGDIR if defined,
+# otherwise we use /etc/zookeeper
+# or the conf directory that is
+# a sibling of this script's directory
+if [ "x$ZOOCFGDIR" = "x" ]
+then
+    if [ -d "/etc/zookeeper" ]
+    then
+        ZOOCFGDIR="/etc/zookeeper"
+    else
+        ZOOCFGDIR="$ZOOBINDIR/../conf"
+    fi
+fi
+
+if [ "x$ZOOCFG" = "x" ]
+then
+    ZOOCFG="zoo.cfg"
+fi
+
+ZOOCFG="$ZOOCFGDIR/$ZOOCFG"
+
+if [ -e "$ZOOCFGDIR/zookeeper-env.sh" ]
+then
+    . "$ZOOCFGDIR/zookeeper-env.sh"
+fi
+
+if [ "x${ZOO_LOG_DIR}" = "x" ]
+then
+    ZOO_LOG_DIR="."
+fi
+
+if [ "x${ZOO_LOG4J_PROP}" = "x" ]
+then
+    ZOO_LOG4J_PROP="INFO,CONSOLE"
+fi
+
+#add the zoocfg dir to classpath
+CLASSPATH="$ZOOCFGDIR:$CLASSPATH"
+
+for i in "$ZOOBINDIR"/../src/java/lib/*.jar
+do
+    CLASSPATH="$i:$CLASSPATH"
+done
+
+#make it work in the release
+for i in "$ZOOBINDIR"/../lib/*.jar
+do
+    CLASSPATH="$i:$CLASSPATH"
+done
+
+#make it work in the release
+for i in "$ZOOBINDIR"/../zookeeper-*.jar
+do
+    CLASSPATH="$i:$CLASSPATH"
+done
+
+#make it work for developers
+for d in "$ZOOBINDIR"/../build/lib/*.jar
+do
+   CLASSPATH="$d:$CLASSPATH"
+done
+
+#make it work for developers
+CLASSPATH="$ZOOBINDIR/../build/classes:$CLASSPATH"
+
+case "`uname`" in
+    CYGWIN*) cygwin=true ;;
+    *) cygwin=false ;;
+esac
+
+if $cygwin
+then
+    CLASSPATH=`cygpath -wp "$CLASSPATH"`
+fi
+
+#echo "CLASSPATH=$CLASSPATH"

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/files/zkServer.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/files/zkServer.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/files/zkServer.sh
new file mode 100644
index 0000000..49ceb4d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/files/zkServer.sh
@@ -0,0 +1,120 @@
+#!/bin/sh
+
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+#
+# If this scripted is run out of /usr/bin or some other system bin directory
+# it should be linked to and not copied. Things like java jar files are found
+# relative to the canonical path of this script.
+#
+
+# See the following page for extensive details on setting
+# up the JVM to accept JMX remote management:
+# http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
+# by default we allow local JMX connections
+if [ "x$JMXLOCALONLY" = "x" ]
+then
+    JMXLOCALONLY=false
+fi
+
+if [ "x$JMXDISABLE" = "x" ]
+then
+    echo "JMX enabled by default"
+    # for some reason these two options are necessary on jdk6 on Ubuntu
+    #   accord to the docs they are not necessary, but otw jconsole cannot
+    #   do a local attach
+    ZOOMAIN="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.local.only=$JMXLOCALONLY org.apache.zookeeper.server.quorum.QuorumPeerMain"
+else
+    echo "JMX disabled by user request"
+    ZOOMAIN="org.apache.zookeeper.server.quorum.QuorumPeerMain"
+fi
+
+# Only follow symlinks if readlink supports it
+if readlink -f "$0" > /dev/null 2>&1
+then
+  ZOOBIN=`readlink -f "$0"`
+else
+  ZOOBIN="$0"
+fi
+ZOOBINDIR=`dirname "$ZOOBIN"`
+
+. "$ZOOBINDIR"/zkEnv.sh
+
+if [ "x$2" != "x" ]
+then
+    ZOOCFG="$ZOOCFGDIR/$2"
+fi
+
+if $cygwin
+then
+    ZOOCFG=`cygpath -wp "$ZOOCFG"`
+    # cygwin has a "kill" in the shell itself, gets confused
+    KILL=/bin/kill
+else
+    KILL=kill
+fi
+
+echo "Using config: $ZOOCFG"
+
+ZOOPIDFILE=$(grep dataDir "$ZOOCFG" | sed -e 's/.*=//')/zookeeper_server.pid
+
+
+case $1 in
+start)
+    echo  "Starting zookeeper ... "
+    $JAVA  "-Dzookeeper.log.dir=${ZOO_LOG_DIR}" "-Dzookeeper.root.logger=${ZOO_LOG4J_PROP}" \
+    -cp "$CLASSPATH" $JVMFLAGS $ZOOMAIN "$ZOOCFG" &
+    /bin/echo -n $! > "$ZOOPIDFILE"
+    echo STARTED
+    ;;
+stop)
+    echo "Stopping zookeeper ... "
+    if [ ! -f "$ZOOPIDFILE" ]
+    then
+    echo "error: could not find file $ZOOPIDFILE"
+    exit 1
+    else
+    $KILL -9 $(cat "$ZOOPIDFILE")
+    rm "$ZOOPIDFILE"
+    echo STOPPED
+    fi
+    ;;
+upgrade)
+    shift
+    echo "upgrading the servers to 3.*"
+    java "-Dzookeeper.log.dir=${ZOO_LOG_DIR}" "-Dzookeeper.root.logger=${ZOO_LOG4J_PROP}" \
+    -cp "$CLASSPATH" $JVMFLAGS org.apache.zookeeper.server.upgrade.UpgradeMain ${@}
+    echo "Upgrading ... "
+    ;;
+restart)
+    shift
+    "$0" stop ${@}
+    sleep 3
+    "$0" start ${@}
+    ;;
+status)
+    STAT=`echo stat | nc localhost $(grep clientPort "$ZOOCFG" | sed -e 's/.*=//') 2> /dev/null| grep Mode`
+    if [ "x$STAT" = "x" ]
+    then
+        echo "Error contacting service. It is probably not running."
+    else
+        echo $STAT
+    fi
+    ;;
+*)
+    echo "Usage: $0 {start|stop|restart|status}" >&2
+
+esac

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/files/zkService.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/files/zkService.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/files/zkService.sh
new file mode 100644
index 0000000..32dfce4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/files/zkService.sh
@@ -0,0 +1,26 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+zkcli_script=$1
+user=$2
+conf_dir=$3
+su - $user -c "source $conf_dir/zookeeper-env.sh ; echo 'ls /' | $zkcli_script"

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/files/zkSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/files/zkSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/files/zkSmoke.sh
new file mode 100644
index 0000000..c1c11b4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/files/zkSmoke.sh
@@ -0,0 +1,78 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+smoke_script=$1
+smoke_user=$2
+conf_dir=$3
+client_port=$4
+security_enabled=$5
+kinit_path_local=$6
+smoke_user_keytab=$7
+export ZOOKEEPER_EXIT_CODE=0
+test_output_file=/tmp/zkSmoke.out
+errors_expr="ERROR|Exception"
+acceptable_expr="SecurityException"
+zkhosts=` grep "^server\.[[:digit:]]"  $conf_dir/zoo.cfg  | cut -f 2 -d '=' | cut -f 1 -d ':' | tr '\n' ' ' `
+zk_node1=`echo $zkhosts | tr ' ' '\n' | head -n 1`  
+echo "zk_node1=$zk_node1"
+if [[ $security_enabled == "True" ]]; then
+  kinitcmd="$kinit_path_local -kt $smoke_user_keytab $smoke_user"
+  su - $smoke_user -c "$kinitcmd"
+fi
+
+function verify_output() {
+  if [ -f $test_output_file ]; then
+    errors=`grep -E $errors_expr $test_output_file | grep -v $acceptable_expr`
+    if [ "$?" -eq 0 ]; then
+      echo "Error found in the zookeeper smoke test. Exiting."
+      echo $errors
+      exit 1
+    fi
+  fi
+}
+
+# Delete /zk_smoketest znode if exists
+su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ;  echo delete /zk_smoketest | ${smoke_script} -server $zk_node1:$client_port" 2>&1>$test_output_file
+# Create /zk_smoketest znode on one zookeeper server
+su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo create /zk_smoketest smoke_data | ${smoke_script} -server $zk_node1:$client_port" 2>&1>>$test_output_file
+verify_output
+
+for i in $zkhosts ; do
+  echo "Running test on host $i"
+  # Verify the data associated with znode across all the nodes in the zookeeper quorum
+  su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'get /zk_smoketest' | ${smoke_script} -server $i:$client_port"
+  su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'ls /' | ${smoke_script} -server $i:$client_port"
+  output=$(su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'get /zk_smoketest' | ${smoke_script} -server $i:$client_port")
+  echo $output | grep smoke_data
+  if [[ $? -ne 0 ]] ; then
+    echo "Data associated with znode /zk_smoketests is not consistent on host $i"
+    ((ZOOKEEPER_EXIT_CODE=$ZOOKEEPER_EXIT_CODE+1))
+  fi
+done
+
+su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'delete /zk_smoketest' | ${smoke_script} -server $zk_node1:$client_port"
+if [[ "$ZOOKEEPER_EXIT_CODE" -ne "0" ]] ; then
+  echo "Zookeeper Smoke Test: Failed" 
+else
+   echo "Zookeeper Smoke Test: Passed" 
+fi
+exit $ZOOKEEPER_EXIT_CODE

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/__init__.py
new file mode 100644
index 0000000..a582077
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/__init__.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/params.py
new file mode 100644
index 0000000..9acc0c9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/params.py
@@ -0,0 +1,71 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+import status_params
+
+# server configurations
+config = Script.get_config()
+
+config_dir = "/etc/zookeeper/conf"
+zk_user =  config['configurations']['global']['zk_user']
+hostname = config['hostname']
+zk_bin = '/usr/lib/zookeeper/bin'
+user_group = config['configurations']['global']['user_group']
+
+smoke_script = "/usr/lib/zookeeper/bin/zkCli.sh"
+
+zk_log_dir = config['configurations']['global']['zk_log_dir']
+zk_data_dir = config['configurations']['global']['zk_data_dir']
+zk_pid_dir = status_params.zk_pid_dir
+zk_pid_file = status_params.zk_pid_file
+zk_server_heapsize = "-Xmx1024m"
+
+tickTime = config['configurations']['global']['tickTime']
+initLimit = config['configurations']['global']['initLimit']
+syncLimit = config['configurations']['global']['syncLimit']
+clientPort = config['configurations']['global']['clientPort']
+
+if 'zoo.cfg' in config['configurations']:
+  zoo_cfg_properties_map = config['configurations']['zoo.cfg']
+else:
+  zoo_cfg_properties_map = {}
+zoo_cfg_properties_map_length = len(zoo_cfg_properties_map)
+
+zk_primary_name = "zookeeper"
+zk_principal_name = "zookeeper/_HOST@EXAMPLE.COM"
+zk_principal = zk_principal_name.replace('_HOST',hostname)
+
+java64_home = config['hostLevelParams']['java_home']
+
+zookeeper_hosts = config['clusterHostInfo']['zookeeper_hosts']
+zookeeper_hosts.sort()
+
+keytab_path = "/etc/security/keytabs"
+zk_keytab_path = format("{keytab_path}/zk.service.keytab")
+zk_server_jaas_file = format("{config_dir}/zookeeper_jaas.conf")
+zk_client_jaas_file = format("{config_dir}/zookeeper_client_jaas.conf")
+security_enabled = config['configurations']['global']['security_enabled']
+
+smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
+smokeuser = config['configurations']['global']['smokeuser']
+kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/service_check.py
new file mode 100644
index 0000000..6b3553d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/service_check.py
@@ -0,0 +1,47 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+class ZookeeperServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    File("/tmp/zkSmoke.sh",
+         mode=0755,
+         content=StaticFile('zkSmoke.sh')
+    )
+
+    cmd_qourum = format("sh /tmp/zkSmoke.sh {smoke_script} {smokeuser} {config_dir} {clientPort} "
+                  "{security_enabled} {kinit_path_local} {smokeUserKeytab}",
+                  smokeUserKeytab=params.smoke_user_keytab if params.security_enabled else "no_keytab")
+
+    Execute(cmd_qourum,
+            tries=3,
+            try_sleep=5,
+            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+            logoutput=True
+    )
+
+if __name__ == "__main__":
+  ZookeeperServiceCheck().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/status_params.py
new file mode 100644
index 0000000..98f2903
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/status_params.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+zk_pid_dir = config['configurations']['global']['zk_pid_dir']
+zk_pid_file = format("{zk_pid_dir}/zookeeper_server.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/zookeeper.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/zookeeper.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/zookeeper.py
new file mode 100644
index 0000000..c49eb22
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/zookeeper.py
@@ -0,0 +1,92 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+import sys
+
+
+def zookeeper(type = None):
+  import params
+
+  Directory(params.config_dir,
+            owner=params.zk_user,
+            recursive=True,
+            group=params.user_group
+  )
+
+  configFile("zoo.cfg", template_name="zoo.cfg.j2")
+  configFile("zookeeper-env.sh", template_name="zookeeper-env.sh.j2")
+  configFile("configuration.xsl", template_name="configuration.xsl.j2")
+
+  Directory(params.zk_pid_dir,
+            owner=params.zk_user,
+            recursive=True,
+            group=params.user_group
+  )
+
+  Directory(params.zk_log_dir,
+            owner=params.zk_user,
+            recursive=True,
+            group=params.user_group
+  )
+
+  Directory(params.zk_data_dir,
+            owner=params.zk_user,
+            recursive=True,
+            group=params.user_group
+  )
+
+  if type == 'server':
+    myid = str(sorted(params.zookeeper_hosts).index(params.hostname) + 1)
+
+    File(format("{zk_data_dir}/myid"),
+         mode = 0644,
+         content = myid
+    )
+
+  configFile("log4j.properties", template_name="log4j.properties.j2")
+
+  if params.security_enabled:
+    if type == "server":
+      configFile("zookeeper_jaas.conf", template_name="zookeeper_jaas.conf.j2")
+      configFile("zookeeper_client_jaas.conf", template_name="zookeeper_client_jaas.conf.j2")
+    else:
+      configFile("zookeeper_client_jaas.conf", template_name="zookeeper_client_jaas.conf.j2")
+
+  File(format("{config_dir}/zoo_sample.cfg"),
+       owner=params.zk_user,
+       group=params.user_group
+  )
+
+
+def configFile(name, template_name=None):
+  import params
+
+  File(format("{config_dir}/{name}"),
+       content=Template(template_name),
+       owner=params.zk_user,
+       group=params.user_group
+  )
+
+
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/zookeeper_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/zookeeper_client.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/zookeeper_client.py
new file mode 100644
index 0000000..028a37d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/zookeeper_client.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+
+from zookeeper import zookeeper
+
+class ZookeeperClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    zookeeper(type='client')
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+  ZookeeperClient().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/zookeeper_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/zookeeper_server.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/zookeeper_server.py
new file mode 100644
index 0000000..e8cc264
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/zookeeper_server.py
@@ -0,0 +1,55 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+
+from zookeeper import zookeeper
+from zookeeper_service import zookeeper_service
+
+class ZookeeperServer(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    zookeeper(type='server')
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env)
+    zookeeper_service(action = 'start')
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+    zookeeper_service(action = 'stop')
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.zk_pid_file)
+
+if __name__ == "__main__":
+  ZookeeperServer().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/zookeeper_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/zookeeper_service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/zookeeper_service.py
new file mode 100644
index 0000000..83b8f08
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/scripts/zookeeper_service.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+def zookeeper_service(action='start'):
+  import params
+
+  cmd = format("env ZOOCFGDIR={config_dir} ZOOCFG=zoo.cfg {zk_bin}/zkServer.sh")
+
+  if action == 'start':
+    daemon_cmd = format("source {config_dir}/zookeeper-env.sh ; {cmd} start")
+    no_op_test = format("ls {zk_pid_file} >/dev/null 2>&1 && ps `cat {zk_pid_file}` >/dev/null 2>&1")
+    Execute(daemon_cmd,
+            not_if=no_op_test,
+            user=params.zk_user
+    )
+  elif action == 'stop':
+    daemon_cmd = format("source {config_dir}/zookeeper-env.sh ; {cmd} stop")
+    rm_pid = format("rm -f {zk_pid_file}")
+    Execute(daemon_cmd,
+            user=params.zk_user
+    )
+    Execute(rm_pid)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/templates/configuration.xsl.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/templates/configuration.xsl.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/templates/configuration.xsl.j2
new file mode 100644
index 0000000..ca498b4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/templates/configuration.xsl.j2
@@ -0,0 +1,37 @@
+<?xml version="1.0"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+<xsl:output method="html"/>
+<xsl:template match="configuration">
+<html>
+<body>
+<table border="1">
+<tr>
+ <td>name</td>
+ <td>value</td>
+ <td>description</td>
+</tr>
+<xsl:for-each select="property">
+  <tr>
+     <td><a name="{name}"><xsl:value-of select="name"/></a></td>
+     <td><xsl:value-of select="value"/></td>
+     <td><xsl:value-of select="description"/></td>
+  </tr>
+</xsl:for-each>
+</table>
+</body>
+</html>
+</xsl:template>
+</xsl:stylesheet>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/templates/log4j.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/templates/log4j.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/templates/log4j.properties.j2
new file mode 100644
index 0000000..db69564
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/templates/log4j.properties.j2
@@ -0,0 +1,71 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+#
+# ZooKeeper Logging Configuration
+#
+
+# Format is "<default threshold> (, <appender>)+
+
+# DEFAULT: console appender only
+log4j.rootLogger=INFO, CONSOLE
+
+# Example with rolling log file
+#log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE
+
+# Example with rolling log file and tracing
+#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE
+
+#
+# Log INFO level and above messages to the console
+#
+log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
+log4j.appender.CONSOLE.Threshold=INFO
+log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
+log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n
+
+#
+# Add ROLLINGFILE to rootLogger to get log file output
+#    Log DEBUG level and above messages to a log file
+log4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender
+log4j.appender.ROLLINGFILE.Threshold=DEBUG
+log4j.appender.ROLLINGFILE.File=zookeeper.log
+
+# Max log file size of 10MB
+log4j.appender.ROLLINGFILE.MaxFileSize=10MB
+# uncomment the next line to limit number of backup files
+#log4j.appender.ROLLINGFILE.MaxBackupIndex=10
+
+log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout
+log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n
+
+
+#
+# Add TRACEFILE to rootLogger to get log file output
+#    Log DEBUG level and above messages to a log file
+log4j.appender.TRACEFILE=org.apache.log4j.FileAppender
+log4j.appender.TRACEFILE.Threshold=TRACE
+log4j.appender.TRACEFILE.File=zookeeper_trace.log
+
+log4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout
+### Notice we are including log4j's NDC here (%x)
+log4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/templates/zoo.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/templates/zoo.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/templates/zoo.cfg.j2
new file mode 100644
index 0000000..5b68218
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/templates/zoo.cfg.j2
@@ -0,0 +1,51 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+# The number of milliseconds of each tick
+tickTime={{tickTime}}
+# The number of ticks that the initial
+# synchronization phase can take
+initLimit={{initLimit}}
+# The number of ticks that can pass between
+# sending a request and getting an acknowledgement
+syncLimit={{syncLimit}}
+# the directory where the snapshot is stored.
+dataDir={{zk_data_dir}}
+# the port at which the clients will connect
+clientPort={{clientPort}}
+{% for host in zookeeper_hosts %}
+server.{{loop.index}}={{host}}:2888:3888
+{% endfor %}
+
+{% if security_enabled %}
+authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider
+jaasLoginRenew=3600000
+kerberos.removeHostFromPrincipal=true
+kerberos.removeRealmFromPrincipal=true
+{% endif %}
+
+{% if zoo_cfg_properties_map_length > 0 %}
+# Custom properties
+{% endif %}
+{% for key, value in zoo_cfg_properties_map.iteritems() %}
+{{key}}={{value}}
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/templates/zookeeper-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/templates/zookeeper-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/templates/zookeeper-env.sh.j2
new file mode 100644
index 0000000..493a2a4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/templates/zookeeper-env.sh.j2
@@ -0,0 +1,25 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+export JAVA_HOME={{java64_home}}
+export ZOO_LOG_DIR={{zk_log_dir}}
+export ZOOPIDFILE={{zk_pid_file}}
+export SERVER_JVMFLAGS={{zk_server_heapsize}}
+export JAVA=$JAVA_HOME/bin/java
+export CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*
+
+{% if security_enabled %}
+export SERVER_JVMFLAGS="$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}"
+export CLIENT_JVMFLAGS="$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}"
+{% endif %}


[17/37] AMBARI-4341. Rename 2.0.8 to 2.1.1 in the stack definition. (mahadev)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/metrics.json b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/metrics.json
deleted file mode 100644
index 68efe9f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/metrics.json
+++ /dev/null
@@ -1,2534 +0,0 @@
-{
-  "NODEMANAGER": {
-    "Component": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "metrics/memory/mem_total": {
-            "metric": "mem_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/memHeapCommittedM": {
-            "metric": "jvm.JvmMetrics.MemHeapCommittedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/mapred/ShuffleOutputsFailed": {
-            "metric": "mapred.ShuffleOutputsFailed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/threadsRunnable": {
-            "metric": "jvm.JvmMetrics.ThreadsRunnable",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsNew": {
-            "metric": "jvm.JvmMetrics.ThreadsNew",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthorizationFailures": {
-            "metric": "rpc.metrics.RpcAuthorizationFailures",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginSuccess_avg_time": {
-            "metric": "ugi.ugi.LoginSuccessAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcQueueTime_avg_time": {
-            "metric": "rpc.rpc.RpcQueueTimeAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/ContainersCompleted": {
-            "metric": "yarn.ContainersCompleted",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/SentBytes": {
-            "metric": "rpc.rpc.SentBytes",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memNonHeapUsedM": {
-            "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/ContainersKilled": {
-            "metric": "yarn.ContainersKilled",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/logWarn": {
-            "metric": "jvm.JvmMetrics.LogWarn",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsTimedWaiting": {
-            "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/gcCount": {
-            "metric": "jvm.JvmMetrics.GcCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/process/proc_run": {
-            "metric": "proc_run",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/swap_total": {
-            "metric": "swap_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/ReceivedBytes": {
-            "metric": "rpc.rpc.ReceivedBytes",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_nice": {
-            "metric": "cpu_nice",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/threadsBlocked": {
-            "metric": "jvm.JvmMetrics.ThreadsBlocked",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcQueueTime_num_ops": {
-            "metric": "rpc.rpc.RpcQueueTimeNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/process/proc_total": {
-            "metric": "proc_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/yarn/AllocatedGB": {
-            "metric": "yarn.AllocatedGB",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/disk/part_max_used": {
-            "metric": "part_max_used",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/NumOpenConnections": {
-            "metric": "rpc.rpc.NumOpenConnections",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memHeapUsedM": {
-            "metric": "jvm.JvmMetrics.MemHeapUsedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsWaiting": {
-            "metric": "jvm.JvmMetrics.ThreadsWaiting",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/disk/disk_free": {
-            "metric": "disk_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/mapred/ShuffleOutputsOK": {
-            "metric": "mapred.ShuffleOutputsOK",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/yarn/ContainersFailed": {
-            "metric": "yarn.ContainersFailed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_buffers": {
-            "metric": "mem_buffers",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/ugi/loginSuccess_num_ops": {
-            "metric": "ugi.ugi.LoginSuccessNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/gcTimeMillis": {
-            "metric": "jvm.JvmMetrics.GcTimeMillis",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_idle": {
-            "metric": "cpu_idle",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/yarn/AllocatedContainers": {
-            "metric": "yarn.AllocatedContainers",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/threadsTerminated": {
-            "metric": "jvm.JvmMetrics.ThreadsTerminated",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/network/bytes_out": {
-            "metric": "bytes_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_aidle": {
-            "metric": "cpu_aidle",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_free": {
-            "metric": "mem_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_user": {
-            "metric": "cpu_user",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/swap_free": {
-            "metric": "swap_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_system": {
-            "metric": "cpu_system",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/bytes_in": {
-            "metric": "bytes_in",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/boottime": {
-            "metric": "boottime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/pkts_out": {
-            "metric": "pkts_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/memNonHeapCommittedM": {
-            "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/callQueueLen": {
-            "metric": "rpc.rpc.CallQueueLength",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_cached": {
-            "metric": "mem_cached",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/yarn/ContainersRunning": {
-            "metric": "yarn.ContainersRunning",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/disk/disk_total": {
-            "metric": "disk_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/logInfo": {
-            "metric": "jvm.JvmMetrics.LogInfo",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/ContainersLaunched": {
-            "metric": "yarn.ContainersLaunched",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/RpcProcessingTime_num_ops": {
-            "metric": "rpc.rpc.RpcProcessingTimeNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginFailure_num_ops": {
-            "metric": "ugi.ugi.LoginFailureNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_shared": {
-            "metric": "mem_shared",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/yarn/AvailableGB": {
-            "metric": "yarn.AvailableGB",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/mapred/ShuffleConnections": {
-            "metric": "mapred.ShuffleConnections",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_wio": {
-            "metric": "cpu_wio",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/yarn/ContainersIniting": {
-            "metric": "yarn.ContainersIniting",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/logError": {
-            "metric": "jvm.JvmMetrics.LogError",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginFailure_avg_time": {
-            "metric": "ugi.ugi.LoginFailureAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_num": {
-            "metric": "cpu_num",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_speed": {
-            "metric": "cpu_speed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthorizationSuccesses": {
-            "metric": "rpc.rpc.RpcAuthorizationSuccesses",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/logFatal": {
-            "metric": "jvm.JvmMetrics.LogFatal",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcProcessingTime_avg_time": {
-            "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/mapred/ShuffleOutputBytes": {
-            "metric": "mapred.ShuffleOutputBytes",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthenticationSuccesses": {
-            "metric": "rpc.metrics.RpcAuthenticationSuccesses",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthenticationFailures": {
-            "metric": "rpc.metrics.RpcAuthenticationFailures",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/network/pkts_in": {
-            "metric": "pkts_in",
-            "pointInTime": true,
-            "temporal": true
-          }
-        }
-      }
-    ],
-    "HostComponent": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "metrics/memory/mem_total": {
-            "metric": "mem_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/memHeapCommittedM": {
-            "metric": "jvm.JvmMetrics.MemHeapCommittedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/mapred/ShuffleOutputsFailed": {
-            "metric": "mapred.ShuffleOutputsFailed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/threadsRunnable": {
-            "metric": "jvm.JvmMetrics.ThreadsRunnable",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsNew": {
-            "metric": "jvm.JvmMetrics.ThreadsNew",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthorizationFailures": {
-            "metric": "rpc.metrics.RpcAuthorizationFailures",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginSuccess_avg_time": {
-            "metric": "ugi.ugi.LoginSuccessAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcQueueTime_avg_time": {
-            "metric": "rpc.rpc.RpcQueueTimeAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/ContainersCompleted": {
-            "metric": "yarn.ContainersCompleted",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/SentBytes": {
-            "metric": "rpc.rpc.SentBytes",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memNonHeapUsedM": {
-            "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/ContainersKilled": {
-            "metric": "yarn.ContainersKilled",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/logWarn": {
-            "metric": "jvm.JvmMetrics.LogWarn",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsTimedWaiting": {
-            "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/gcCount": {
-            "metric": "jvm.JvmMetrics.GcCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/process/proc_run": {
-            "metric": "proc_run",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/swap_total": {
-            "metric": "swap_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/ReceivedBytes": {
-            "metric": "rpc.rpc.ReceivedBytes",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_nice": {
-            "metric": "cpu_nice",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/threadsBlocked": {
-            "metric": "jvm.JvmMetrics.ThreadsBlocked",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcQueueTime_num_ops": {
-            "metric": "rpc.rpc.RpcQueueTimeNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/process/proc_total": {
-            "metric": "proc_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/yarn/AllocatedGB": {
-            "metric": "yarn.AllocatedGB",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/disk/part_max_used": {
-            "metric": "part_max_used",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/NumOpenConnections": {
-            "metric": "rpc.rpc.NumOpenConnections",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memHeapUsedM": {
-            "metric": "jvm.JvmMetrics.MemHeapUsedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsWaiting": {
-            "metric": "jvm.JvmMetrics.ThreadsWaiting",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/disk/disk_free": {
-            "metric": "disk_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/mapred/ShuffleOutputsOK": {
-            "metric": "mapred.ShuffleOutputsOK",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/yarn/ContainersFailed": {
-            "metric": "yarn.ContainersFailed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_buffers": {
-            "metric": "mem_buffers",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/ugi/loginSuccess_num_ops": {
-            "metric": "ugi.ugi.LoginSuccessNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/gcTimeMillis": {
-            "metric": "jvm.JvmMetrics.GcTimeMillis",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_idle": {
-            "metric": "cpu_idle",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/yarn/AllocatedContainers": {
-            "metric": "yarn.AllocatedContainers",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/threadsTerminated": {
-            "metric": "jvm.JvmMetrics.ThreadsTerminated",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/network/bytes_out": {
-            "metric": "bytes_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_aidle": {
-            "metric": "cpu_aidle",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_free": {
-            "metric": "mem_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_user": {
-            "metric": "cpu_user",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/swap_free": {
-            "metric": "swap_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_system": {
-            "metric": "cpu_system",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/bytes_in": {
-            "metric": "bytes_in",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/boottime": {
-            "metric": "boottime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/pkts_out": {
-            "metric": "pkts_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/memNonHeapCommittedM": {
-            "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/callQueueLen": {
-            "metric": "rpc.rpc.CallQueueLength",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_cached": {
-            "metric": "mem_cached",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/yarn/ContainersRunning": {
-            "metric": "yarn.ContainersRunning",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/disk/disk_total": {
-            "metric": "disk_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/logInfo": {
-            "metric": "jvm.JvmMetrics.LogInfo",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/ContainersLaunched": {
-            "metric": "yarn.ContainersLaunched",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/RpcProcessingTime_num_ops": {
-            "metric": "rpc.rpc.RpcProcessingTimeNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginFailure_num_ops": {
-            "metric": "ugi.ugi.LoginFailureNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_shared": {
-            "metric": "mem_shared",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/yarn/AvailableGB": {
-            "metric": "yarn.AvailableGB",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/mapred/ShuffleConnections": {
-            "metric": "mapred.ShuffleConnections",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_wio": {
-            "metric": "cpu_wio",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/yarn/ContainersIniting": {
-            "metric": "yarn.ContainersIniting",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/logError": {
-            "metric": "jvm.JvmMetrics.LogError",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginFailure_avg_time": {
-            "metric": "ugi.ugi.LoginFailureAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_num": {
-            "metric": "cpu_num",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_speed": {
-            "metric": "cpu_speed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthorizationSuccesses": {
-            "metric": "rpc.rpc.RpcAuthorizationSuccesses",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/logFatal": {
-            "metric": "jvm.JvmMetrics.LogFatal",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcProcessingTime_avg_time": {
-            "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/mapred/ShuffleOutputBytes": {
-            "metric": "mapred.ShuffleOutputBytes",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthenticationSuccesses": {
-            "metric": "rpc.metrics.RpcAuthenticationSuccesses",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthenticationFailures": {
-            "metric": "rpc.metrics.RpcAuthenticationFailures",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/network/pkts_in": {
-            "metric": "pkts_in",
-            "pointInTime": true,
-            "temporal": true
-          }
-        }
-      },
-      {
-        "type": "jmx",
-        "metrics": {
-          "metrics/jvm/memHeapCommittedM": {
-            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.MemHeapCommittedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsRunnable": {
-            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsRunnable",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsNew": {
-            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsNew",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memNonHeapCommittedM": {
-            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.MemNonHeapCommittedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/callQueueLen": {
-            "metric": "Hadoop:service=NodeManager,name=RpcActivity.CallQueueLength",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/rpcAuthorizationFailures": {
-            "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcAuthorizationFailures",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/RpcQueueTime_avg_time": {
-            "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcQueueTimeAvgTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/ugi/loginSuccess_avg_time": {
-            "metric": "Hadoop:service=NodeManager,name=UgiMetrics.LoginSuccessAvgTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/SentBytes": {
-            "metric": "Hadoop:service=NodeManager,name=RpcActivity.SentBytes",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/logInfo": {
-            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.LogInfo",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memNonHeapUsedM": {
-            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.MemNonHeapUsedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/logWarn": {
-            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.LogWarn",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/ugi/loginFailure_num_ops": {
-            "metric": "Hadoop:service=NodeManager,name=UgiMetrics.LoginFailureNumOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsTimedWaiting": {
-            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsTimedWaiting",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/RpcProcessingTime_num_ops": {
-            "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcProcessingTimeNumOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/gcCount": {
-            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.GcCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/ReceivedBytes": {
-            "metric": "Hadoop:service=NodeManager,name=RpcActivity.ReceivedBytes",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsBlocked": {
-            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsBlocked",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/RpcQueueTime_num_ops": {
-            "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcQueueTimeNumOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/logError": {
-            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.LogError",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/ugi/loginFailure_avg_time": {
-            "metric": "Hadoop:service=NodeManager,name=UgiMetrics.LoginFailureAvgTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/rpcAuthorizationSuccesses": {
-            "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcAuthorizationSuccesses",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/NumOpenConnections": {
-            "metric": "Hadoop:service=NodeManager,name=RpcActivity.NumOpenConnections",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memHeapUsedM": {
-            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.MemHeapUsedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/logFatal": {
-            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.LogFatal",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/RpcProcessingTime_avg_time": {
-            "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcProcessingTimeAvgTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsWaiting": {
-            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsWaiting",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/gcTimeMillis": {
-            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.GcTimeMillis",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/ugi/loginSuccess_num_ops": {
-            "metric": "Hadoop:service=NodeManager,name=UgiMetrics.LoginSuccessNumOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/rpcAuthenticationSuccesses": {
-            "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcAuthenticationSuccesses",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/rpcAuthenticationFailures": {
-            "metric": "Hadoop:service=NodeManager,name=RpcActivity.RpcAuthenticationFailures",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsTerminated": {
-            "metric": "Hadoop:service=NodeManager,name=JvmMetrics.ThreadsTerminated",
-            "pointInTime": true,
-            "temporal": false
-          }
-        }
-      }
-    ]
-  },
-  "RESOURCEMANAGER": {
-    "Component": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "metrics/rpcdetailed/FinishApplicationMasterNumOps": {
-            "metric": "rpcdetailed.rpcdetailed.FinishApplicationMasterNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_total": {
-            "metric": "mem_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsCompleted": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).AppsCompleted",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/ClusterMetrics/NumUnhealthyNMs": {
-            "metric": "yarn.ClusterMetrics.NumUnhealthyNMs",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memHeapCommittedM": {
-            "metric": "jvm.JvmMetrics.MemHeapCommittedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsRunnable": {
-            "metric": "jvm.JvmMetrics.ThreadsRunnable",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/ClusterMetrics/NumRebootedNMs": {
-            "metric": "yarn.ClusterMetrics.NumRebootedNMs",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsNew": {
-            "metric": "jvm.JvmMetrics.ThreadsNew",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsSubmitted": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).AppsSubmitted",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/ClusterMetrics/NumLostNMs": {
-            "metric": "yarn.ClusterMetrics.NumLostNMs",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginSuccess_avg_time": {
-            "metric": "ugi.ugi.LoginSuccessAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcQueueTime_avg_time": {
-            "metric": "rpc.rpc.RpcQueueTimeAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AllocatedContainers": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).AllocatedContainers",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/SentBytes": {
-            "metric": "rpc.rpc.SentBytes",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsKilled": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).AppsKilled",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/ClusterMetrics/NumActiveNMs": {
-            "metric": "yarn.ClusterMetrics.NumActiveNMs",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memNonHeapUsedM": {
-            "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/logWarn": {
-            "metric": "jvm.JvmMetrics.LogWarn",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsTimedWaiting": {
-            "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsFailed": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).AppsFailed",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/gcCount": {
-            "metric": "jvm.JvmMetrics.GcCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/process/proc_run": {
-            "metric": "proc_run",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/swap_total": {
-            "metric": "swap_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/ReceivedBytes": {
-            "metric": "rpc.rpc.ReceivedBytes",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/AllocateNumOps": {
-            "metric": "rpcdetailed.rpcdetailed.AllocateNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_nice": {
-            "metric": "cpu_nice",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/gcCountMarkSweepCompact": {
-            "metric": "jvm.JvmMetrics.GcCountMarkSweepCompact",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/threadsBlocked": {
-            "metric": "jvm.JvmMetrics.ThreadsBlocked",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsRunning": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).AppsRunning",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/ClusterMetrics/NumDecommissionedNMs": {
-            "metric": "yarn.ClusterMetrics.NumDecommissionedNMs",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcQueueTime_num_ops": {
-            "metric": "rpc.rpc.RpcQueueTimeNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/process/proc_total": {
-            "metric": "proc_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/disk/part_max_used": {
-            "metric": "part_max_used",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/NumOpenConnections": {
-            "metric": "rpc.rpc.NumOpenConnections",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memHeapUsedM": {
-            "metric": "jvm.JvmMetrics.MemHeapUsedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/disk/disk_free": {
-            "metric": "disk_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/threadsWaiting": {
-            "metric": "jvm.JvmMetrics.ThreadsWaiting",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/load_one": {
-            "metric": "load_one",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_buffers": {
-            "metric": "mem_buffers",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/ugi/loginSuccess_num_ops": {
-            "metric": "ugi.ugi.LoginSuccessNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/gcTimeMillisCopy": {
-            "metric": "jvm.JvmMetrics.GcTimeMillisCopy",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/gcTimeMillis": {
-            "metric": "jvm.JvmMetrics.GcTimeMillis",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_idle": {
-            "metric": "cpu_idle",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/PendingContainers": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).PendingContainers",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memMaxM": {
-            "metric": "jvm.JvmMetrics.MemMaxM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsTerminated": {
-            "metric": "jvm.JvmMetrics.ThreadsTerminated",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/network/bytes_out": {
-            "metric": "bytes_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_aidle": {
-            "metric": "cpu_aidle",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_free": {
-            "metric": "mem_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/AllocateAvgTime": {
-            "metric": "rpcdetailed.rpcdetailed.AllocateAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_user": {
-            "metric": "cpu_user",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/swap_free": {
-            "metric": "swap_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_system": {
-            "metric": "cpu_system",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/load_five": {
-            "metric": "load_five",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/bytes_in": {
-            "metric": "bytes_in",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/boottime": {
-            "metric": "boottime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/pkts_out": {
-            "metric": "pkts_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/memNonHeapCommittedM": {
-            "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/GetApplicationReportNumOps": {
-            "metric": "rpcdetailed.rpcdetailed.GetApplicationReportNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/FinishApplicationMasterAvgTime": {
-            "metric": "rpcdetailed.rpcdetailed.FinishApplicationMasterAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/callQueueLen": {
-            "metric": "rpc.rpc.CallQueueLength",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_cached": {
-            "metric": "mem_cached",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/RegisterApplicationMasterNumOps": {
-            "metric": "rpcdetailed.rpcdetailed.RegisterApplicationMasterNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/disk/disk_total": {
-            "metric": "disk_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AvailableMB": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).AvailableMB",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/PendingMB": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).PendingMB",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/logInfo": {
-            "metric": "jvm.JvmMetrics.LogInfo",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcProcessingTime_num_ops": {
-            "metric": "rpc.rpc.RpcProcessingTimeNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginFailure_num_ops": {
-            "metric": "ugi.ugi.LoginFailureNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_shared": {
-            "metric": "mem_shared",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/SubmitApplicationAvgTime": {
-            "metric": "rpcdetailed.rpcdetailed.SubmitApplicationAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_wio": {
-            "metric": "cpu_wio",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/GetNewApplicationNumOps": {
-            "metric": "rpcdetailed.rpcdetailed.GetNewApplicationNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsPending": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).AppsPending",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/gcCountCopy": {
-            "metric": "jvm.JvmMetrics.GcCountCopy",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/load_fifteen": {
-            "metric": "load_fifteen",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/logError": {
-            "metric": "jvm.JvmMetrics.LogError",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginFailure_avg_time": {
-            "metric": "ugi.ugi.LoginFailureAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_num": {
-            "metric": "cpu_num",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/SubmitApplicationNumOps": {
-            "metric": "rpcdetailed.rpcdetailed.SubmitApplicationNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/gcTimeMillisMarkSweepCompact": {
-            "metric": "jvm.JvmMetrics.GcTimeMillisMarkSweepCompact",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_speed": {
-            "metric": "cpu_speed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthorizationSuccesses": {
-            "metric": "rpc.rpc.RpcAuthorizationSuccesses",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AllocatedMB": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).AllocatedMB",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/logFatal": {
-            "metric": "jvm.JvmMetrics.LogFatal",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcProcessingTime_avg_time": {
-            "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/GetApplicationReportAvgTime": {
-            "metric": "rpcdetailed.rpcdetailed.GetApplicationReportAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/NodeHeartbeatAvgTime": {
-            "metric": "rpcdetailed.rpcdetailed.NodeHeartbeatAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/GetNewApplicationAvgTime": {
-            "metric": "rpcdetailed.rpcdetailed.GetNewApplicationAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/pkts_in": {
-            "metric": "pkts_in",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/RegisterApplicationMasterAvgTime": {
-            "metric": "rpcdetailed.rpcdetailed.RegisterApplicationMasterAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/ReservedContainers": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).ReservedContainers",
-            "pointInTime": false,
-            "temporal": true
-          }
-        }
-      },
-      {
-        "type": "jmx",
-        "metrics": {
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsFailed": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsFailed",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/rm_metrics/cluster/rebootedNMcount": {
-            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumRebootedNMs",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memHeapCommittedM": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemHeapCommittedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/ClusterMetrics/NumUnhealthyNMs": {
-            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumUnhealthyNMs",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/HeapMemoryMax":{
-            "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
-            "pointInTime" : true,
-            "temporal" : false
-          },
-          "metrics/jvm/HeapMemoryUsed":{
-            "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
-            "pointInTime" : true,
-            "temporal" : false
-          },
-          "metrics/jvm/NonHeapMemoryMax":{
-            "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
-            "pointInTime" : true,
-            "temporal" : false
-          },
-          "metrics/jvm/NonHeapMemoryUsed":{
-            "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
-            "pointInTime" : true,
-            "temporal" : false
-          },
-          "metrics/jvm/threadsRunnable": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsRunnable",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsNew": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsNew",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/ClusterMetrics/NumRebootedNMs": {
-            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumRebootedNMs",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/runtime/StartTime": {
-            "metric": "java.lang:type=Runtime.StartTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsKilled": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsKilled",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/rpcAuthorizationFailures": {
-            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcAuthorizationFailures",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AggregateContainersAllocated": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AggregateContainersAllocated",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/ClusterMetrics/NumLostNMs": {
-            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumLostNMs",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/ugi/loginSuccess_avg_time": {
-            "metric": "Hadoop:service=ResourceManager,name=UgiMetrics.LoginSuccessAvgTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/StartTime": {
-            "metric": "java.lang:type=Runtime.StartTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/RpcQueueTime_avg_time": {
-            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcQueueTimeAvgTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/ReservedContainers": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).ReservedContainers",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsSubmitted": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsSubmitted",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/SentBytes": {
-            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.SentBytes",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/ClusterMetrics/NumActiveNMs": {
-            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumActiveNMs",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/running_300": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).running_300",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memNonHeapUsedM": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemNonHeapUsedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/logWarn": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.LogWarn",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsTimedWaiting": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsTimedWaiting",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/gcCount": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.GcCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/ReceivedBytes": {
-            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.ReceivedBytes",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsBlocked": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsBlocked",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/running_60": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).running_60",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/RpcQueueTime_num_ops": {
-            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcQueueTimeNumOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/ClusterMetrics/NumDecommissionedNMs": {
-            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumDecommissionedNMs",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AllocatedContainers": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AllocatedContainers",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/PendingContainers": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).PendingContainers",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/NumOpenConnections": {
-            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.NumOpenConnections",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memHeapUsedM": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemHeapUsedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsWaiting": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsWaiting",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/gcTimeMillis": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.GcTimeMillis",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/ugi/loginSuccess_num_ops": {
-            "metric": "Hadoop:service=ResourceManager,name=UgiMetrics.LoginSuccessNumOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsTerminated": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsTerminated",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memMaxM": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemMaxM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/rm_metrics/cluster/unhealthyNMcount": {
-            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumUnhealthyNMs",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/ReservedVCores": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).ReservedVCores",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/rm_metrics/cluster/decommissionedNMcount": {
-            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumDecommissionedNMs",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/startTime": {
-            "metric": "java.lang:type=Runtime.StartTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/ActiveApplications": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).ActiveApplications",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AvailableMB": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AvailableMB",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memNonHeapCommittedM": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemNonHeapCommittedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/rm_metrics/cluster/nodeManagers": {
-            "metric": "Hadoop:service=ResourceManager,name=RMNMInfo.LiveNodeManagers",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/callQueueLen": {
-            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.CallQueueLength",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AllocatedVCores": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AllocatedVCores",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsPending": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsPending",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsCompleted": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsCompleted",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/ActiveUsers": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).ActiveUsers",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/logInfo": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.LogInfo",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsRunning": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsRunning",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/running_1440": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).running_1440",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AvailableVCores": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AvailableVCores",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/ugi/loginFailure_num_ops": {
-            "metric": "Hadoop:service=ResourceManager,name=UgiMetrics.LoginFailureNumOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/RpcProcessingTime_num_ops": {
-            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcProcessingTimeNumOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/ReservedMB": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).ReservedMB",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/logError": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.LogError",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/PendingMB": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).PendingMB",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/ugi/loginFailure_avg_time": {
-            "metric": "Hadoop:service=ResourceManager,name=UgiMetrics.LoginFailureAvgTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/rpcAuthorizationSuccesses": {
-            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcAuthorizationSuccesses",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/logFatal": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.LogFatal",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/RpcProcessingTime_avg_time": {
-            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcProcessingTimeAvgTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/rm_metrics/cluster/activeNMcount": {
-            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumActiveNMs",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/rpcAuthenticationSuccesses": {
-            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcAuthenticationSuccesses",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AggregateContainersReleased": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AggregateContainersReleased",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/rpcAuthenticationFailures": {
-            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcAuthenticationFailures",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/rm_metrics/cluster/lostNMcount": {
-            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumLostNMs",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AllocatedMB": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AllocatedMB",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/PendingVCores": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).PendingVCores",
-            "pointInTime": true,
-            "temporal": false
-          }
-        }
-      }
-    ],
-    "HostComponent": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "metrics/rpcdetailed/FinishApplicationMasterNumOps": {
-            "metric": "rpcdetailed.rpcdetailed.FinishApplicationMasterNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_total": {
-            "metric": "mem_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsCompleted": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).AppsCompleted",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/ClusterMetrics/NumUnhealthyNMs": {
-            "metric": "yarn.ClusterMetrics.NumUnhealthyNMs",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memHeapCommittedM": {
-            "metric": "jvm.JvmMetrics.MemHeapCommittedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsRunnable": {
-            "metric": "jvm.JvmMetrics.ThreadsRunnable",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/ClusterMetrics/NumRebootedNMs": {
-            "metric": "yarn.ClusterMetrics.NumRebootedNMs",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsNew": {
-            "metric": "jvm.JvmMetrics.ThreadsNew",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsSubmitted": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).AppsSubmitted",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/ClusterMetrics/NumLostNMs": {
-            "metric": "yarn.ClusterMetrics.NumLostNMs",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginSuccess_avg_time": {
-            "metric": "ugi.ugi.LoginSuccessAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcQueueTime_avg_time": {
-            "metric": "rpc.rpc.RpcQueueTimeAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AllocatedContainers": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).AllocatedContainers",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/SentBytes": {
-            "metric": "rpc.rpc.SentBytes",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsKilled": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).AppsKilled",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/ClusterMetrics/NumActiveNMs": {
-            "metric": "yarn.ClusterMetrics.NumActiveNMs",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memNonHeapUsedM": {
-            "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/logWarn": {
-            "metric": "jvm.JvmMetrics.LogWarn",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsTimedWaiting": {
-            "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsFailed": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).AppsFailed",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/gcCount": {
-            "metric": "jvm.JvmMetrics.GcCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/process/proc_run": {
-            "metric": "proc_run",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/swap_total": {
-            "metric": "swap_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/ReceivedBytes": {
-            "metric": "rpc.rpc.ReceivedBytes",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/AllocateNumOps": {
-            "metric": "rpcdetailed.rpcdetailed.AllocateNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_nice": {
-            "metric": "cpu_nice",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/gcCountMarkSweepCompact": {
-            "metric": "jvm.JvmMetrics.GcCountMarkSweepCompact",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/threadsBlocked": {
-            "metric": "jvm.JvmMetrics.ThreadsBlocked",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsRunning": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).AppsRunning",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/ClusterMetrics/NumDecommissionedNMs": {
-            "metric": "yarn.ClusterMetrics.NumDecommissionedNMs",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcQueueTime_num_ops": {
-            "metric": "rpc.rpc.RpcQueueTimeNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/process/proc_total": {
-            "metric": "proc_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/disk/part_max_used": {
-            "metric": "part_max_used",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/NumOpenConnections": {
-            "metric": "rpc.rpc.NumOpenConnections",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memHeapUsedM": {
-            "metric": "jvm.JvmMetrics.MemHeapUsedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/disk/disk_free": {
-            "metric": "disk_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/threadsWaiting": {
-            "metric": "jvm.JvmMetrics.ThreadsWaiting",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/load_one": {
-            "metric": "load_one",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_buffers": {
-            "metric": "mem_buffers",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/ugi/loginSuccess_num_ops": {
-            "metric": "ugi.ugi.LoginSuccessNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/gcTimeMillisCopy": {
-            "metric": "jvm.JvmMetrics.GcTimeMillisCopy",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/gcTimeMillis": {
-            "metric": "jvm.JvmMetrics.GcTimeMillis",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_idle": {
-            "metric": "cpu_idle",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/PendingContainers": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).PendingContainers",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memMaxM": {
-            "metric": "jvm.JvmMetrics.MemMaxM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsTerminated": {
-            "metric": "jvm.JvmMetrics.ThreadsTerminated",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/network/bytes_out": {
-            "metric": "bytes_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_aidle": {
-            "metric": "cpu_aidle",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_free": {
-            "metric": "mem_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/AllocateAvgTime": {
-            "metric": "rpcdetailed.rpcdetailed.AllocateAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_user": {
-            "metric": "cpu_user",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/swap_free": {
-            "metric": "swap_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_system": {
-            "metric": "cpu_system",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/load_five": {
-            "metric": "load_five",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/bytes_in": {
-            "metric": "bytes_in",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/boottime": {
-            "metric": "boottime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/pkts_out": {
-            "metric": "pkts_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/memNonHeapCommittedM": {
-            "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/GetApplicationReportNumOps": {
-            "metric": "rpcdetailed.rpcdetailed.GetApplicationReportNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/FinishApplicationMasterAvgTime": {
-            "metric": "rpcdetailed.rpcdetailed.FinishApplicationMasterAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/callQueueLen": {
-            "metric": "rpc.rpc.CallQueueLength",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_cached": {
-            "metric": "mem_cached",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/RegisterApplicationMasterNumOps": {
-            "metric": "rpcdetailed.rpcdetailed.RegisterApplicationMasterNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/disk/disk_total": {
-            "metric": "disk_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AvailableMB": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).AvailableMB",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/PendingMB": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).PendingMB",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/logInfo": {
-            "metric": "jvm.JvmMetrics.LogInfo",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcProcessingTime_num_ops": {
-            "metric": "rpc.rpc.RpcProcessingTimeNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginFailure_num_ops": {
-            "metric": "ugi.ugi.LoginFailureNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_shared": {
-            "metric": "mem_shared",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/SubmitApplicationAvgTime": {
-            "metric": "rpcdetailed.rpcdetailed.SubmitApplicationAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_wio": {
-            "metric": "cpu_wio",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/GetNewApplicationNumOps": {
-            "metric": "rpcdetailed.rpcdetailed.GetNewApplicationNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AppsPending": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).AppsPending",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/gcCountCopy": {
-            "metric": "jvm.JvmMetrics.GcCountCopy",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/load_fifteen": {
-            "metric": "load_fifteen",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/logError": {
-            "metric": "jvm.JvmMetrics.LogError",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginFailure_avg_time": {
-            "metric": "ugi.ugi.LoginFailureAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_num": {
-            "metric": "cpu_num",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/SubmitApplicationNumOps": {
-            "metric": "rpcdetailed.rpcdetailed.SubmitApplicationNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/gcTimeMillisMarkSweepCompact": {
-            "metric": "jvm.JvmMetrics.GcTimeMillisMarkSweepCompact",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_speed": {
-            "metric": "cpu_speed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthorizationSuccesses": {
-            "metric": "rpc.rpc.RpcAuthorizationSuccesses",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/AllocatedMB": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).AllocatedMB",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/logFatal": {
-            "metric": "jvm.JvmMetrics.LogFatal",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcProcessingTime_avg_time": {
-            "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/GetApplicationReportAvgTime": {
-            "metric": "rpcdetailed.rpcdetailed.GetApplicationReportAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/NodeHeartbeatAvgTime": {
-            "metric": "rpcdetailed.rpcdetailed.NodeHeartbeatAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/GetNewApplicationAvgTime": {
-            "metric": "rpcdetailed.rpcdetailed.GetNewApplicationAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/pkts_in": {
-            "metric": "pkts_in",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/RegisterApplicationMasterAvgTime": {
-            "metric": "rpcdetailed.rpcdetailed.RegisterApplicationMasterAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\"([.])\",\"/\")/ReservedContainers": {
-            "metric": "yarn.QueueMetrics.Queue=(.+).ReservedContainers",
-            "pointInTime": false,
-            "temporal": true
-          }
-        }
-      },
-      {
-        "type": "jmx",
-        "metrics": {
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsFailed": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsFailed",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memHeapCommittedM": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemHeapCommittedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/ClusterMetrics/NumUnhealthyNMs": {
-            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumUnhealthyNMs",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsRunnable": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsRunnable",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsNew": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsNew",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/ClusterMetrics/NumRebootedNMs": {
-            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumRebootedNMs",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsKilled": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsKilled",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/rpcAuthorizationFailures": {
-            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcAuthorizationFailures",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AggregateContainersAllocated": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AggregateContainersAllocated",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/ClusterMetrics/NumLostNMs": {
-            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumLostNMs",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/RpcQueueTime_avg_time": {
-            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcQueueTimeAvgTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/ugi/loginSuccess_avg_time": {
-            "metric": "Hadoop:service=ResourceManager,name=UgiMetrics.LoginSuccessAvgTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/ReservedContainers": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).ReservedContainers",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/AppsSubmitted": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).AppsSubmitted",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/SentBytes": {
-            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.SentBytes",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/ClusterMetrics/NumActiveNMs": {
-            "metric": "Hadoop:service=ResourceManager,name=ClusterMetrics.NumActiveNMs",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/running_300": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).running_300",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/HeapMemoryMax":{
-            "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
-            "pointInTime" : true,
-            "temporal" : false
-          },
-          "metrics/jvm/HeapMemoryUsed":{
-            "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
-            "pointInTime" : true,
-            "temporal" : false
-          },
-          "metrics/jvm/NonHeapMemoryMax":{
-            "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
-            "pointInTime" : true,
-            "temporal" : false
-          },
-          "metrics/jvm/NonHeapMemoryUsed":{
-            "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
-            "pointInTime" : true,
-            "temporal" : false
-          },
-          "metrics/jvm/memNonHeapUsedM": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.MemNonHeapUsedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/logWarn": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.LogWarn",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsTimedWaiting": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsTimedWaiting",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/gcCount": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.GcCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/ReceivedBytes": {
-            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.ReceivedBytes",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsBlocked": {
-            "metric": "Hadoop:service=ResourceManager,name=JvmMetrics.ThreadsBlocked",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/yarn/Queue/$1.replaceAll(\",q(\\d+)=\",\"/\").substring(1)/running_60": {
-            "metric": "Hadoop:service=ResourceManager,name=QueueMetrics(.+).running_60",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/RpcQueueTime_num_ops": {
-            "metric": "Hadoop:service=ResourceManager,name=RpcActivity.RpcQueueTimeNumOps",
-            "pointInTime": true,
-            "temporal": false
-  

<TRUNCATED>

[12/37] AMBARI-4341. Rename 2.0.8 to 2.1.1 in the stack definition. (mahadev)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/metrics.json b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/metrics.json
new file mode 100644
index 0000000..37f73bf
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/metrics.json
@@ -0,0 +1,13635 @@
+{
+  "HBASE_REGIONSERVER": {
+    "Component": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "metrics/hbase/regionserver/compactionTime_avg_time": {
+            "metric": "hbase.regionserver.compactionTime_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/closeRegion_num_ops": {
+            "metric": "rpc.rpc.closeRegion_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/mutationsWithoutWALSize": {
+            "metric": "regionserver.Server.mutationsWithoutWALSize",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/unassign_num_ops": {
+            "metric": "rpc.rpc.unassign_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/modifyTable_num_ops": {
+            "metric": "rpc.rpc.modifyTable_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getProtocolVersion_avg_time": {
+            "metric": "rpc.rpc.getProtocolVersion_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getZooKeeper/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.getZooKeeper.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/load/load_one": {
+            "metric": "load_one",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getClosestRowBefore_num_ops": {
+            "metric": "rpc.rpc.getClosestRowBefore_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/slowAppendCount": {
+            "metric": "regionserver.Server.slowAppendCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/getClosestRowBefore/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.getClosestRowBefore.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/lockRow_num_ops": {
+            "metric": "rpc.rpc.lockRow_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/flushRegion_avg_time": {
+            "metric": "rpc.rpc.flushRegion_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/swap_total": {
+            "metric": "swap_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/stopMaster_num_ops": {
+            "metric": "rpc.rpc.stopMaster_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/openRegions/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.openRegions.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/balance_avg_time": {
+            "metric": "rpc.rpc.balance_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/process/proc_total": {
+            "metric": "proc_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/disk/part_max_used": {
+            "metric": "part_max_used",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/modifyColumn_avg_time": {
+            "metric": "rpc.rpc.modifyColumn_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/multi/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.multi.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/rootIndexSizeKB": {
+            "metric": "hbase.regionserver.rootIndexSizeKB",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getZooKeeper_num_ops": {
+            "metric": "rpc.rpc.getZooKeeper_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/blockCacheCount": {
+            "metric": "regionserver.Server.blockCacheCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/flushRegion_num_ops": {
+            "metric": "rpc.rpc.flushRegion_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/ugi/loginSuccess_num_ops": {
+            "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/rollHLogWriter/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.rollHLogWriter.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.removeFromOnlineRegions.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/putRequestLatency_std_dev": {
+            "metric": "hbase.regionserver.putRequestLatency_std_dev",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/getRequestLatency_std_dev": {
+            "metric": "hbase.regionserver.getRequestLatency_std_dev",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/get_num_ops": {
+            "metric": "rpc.rpc.get_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/stopMaster_avg_time": {
+            "metric": "rpc.rpc.stopMaster_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getOnlineRegions/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.getOnlineRegions.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/bytes_in": {
+            "metric": "bytes_in",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/removeFromOnlineRegions_num_ops": {
+            "metric": "rpc.rpc.removeFromOnlineRegions_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/ping_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.ping_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/openScanner/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.openScanner.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getRegionInfo_avg_time": {
+            "metric": "rpc.rpc.getRegionInfo_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/lockRow_avg_time": {
+            "metric": "rpc.rpc.lockRow_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/commitPending_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.commitPending_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/checkOOME_num_ops": {
+            "metric": "rpc.rpc.checkOOME_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/reportRSFatalError_num_ops": {
+            "metric": "rpc.rpc.reportRSFatalError_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getConfiguration/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.getConfiguration.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/reportRSFatalError_avg_time": {
+            "metric": "rpc.rpc.reportRSFatalError_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/pkts_in": {
+            "metric": "pkts_in",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_total": {
+            "metric": "mem_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/memHeapCommittedM": {
+            "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/threadsRunnable": {
+            "metric": "jvm.JvmMetrics.ThreadsRunnable",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/unlockRow/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.unlockRow.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/deleteRequestLatency_min": {
+            "metric": "regionserver.Server.Delete_min",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsNew": {
+            "metric": "jvm.JvmMetrics.ThreadsNew",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getClusterStatus_num_ops": {
+            "metric": "rpc.rpc.getClusterStatus_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getHTableDescriptors_avg_time": {
+            "metric": "rpc.rpc.getHTableDescriptors_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthorizationFailures": {
+            "metric": "rpc.rpc.rpcAuthorizationFailures",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/deleteColumn_num_ops": {
+            "metric": "rpc.rpc.deleteColumn_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/delete/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.delete.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/RpcQueueTime_avg_time": {
+            "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/increment_num_ops": {
+            "metric": "rpc.rpc.increment_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getMapCompletionEvents_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.getMapCompletionEvents_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/stop/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.stop.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/modifyColumn_num_ops": {
+            "metric": "rpc.rpc.modifyColumn_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/checkOOME_avg_time": {
+            "metric": "rpc.rpc.checkOOME_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/next/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.next.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/RpcSlowResponse_avg_time": {
+            "metric": "rpc.rpc.RpcSlowResponse_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getConfiguration_avg_time": {
+            "metric": "rpc.rpc.getConfiguration_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getServerName/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.getServerName.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_nice": {
+            "metric": "cpu_nice",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/unassign_avg_time": {
+            "metric": "rpc.rpc.unassign_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/NumOpenConnections": {
+            "metric": "rpc.rpc.NumOpenConnections",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/delete/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.delete.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/canCommit_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.canCommit_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/multi/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.multi.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/bytes_out": {
+            "metric": "bytes_out",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/load/load_five": {
+            "metric": "load_five",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsReadLatencyHistogram_75th_percentile": {
+            "metric": "hbase.regionserver.fsReadLatencyHistogram_75th_percentile",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/deleteRequestLatency_num_ops": {
+            "metric": "regionserver.Server.Delete_num_ops",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/boottime": {
+            "metric": "boottime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/compactRegion_avg_time": {
+            "metric": "rpc.rpc.compactRegion_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsWriteLatencyHistogram_num_ops": {
+            "metric": "hbase.regionserver.fsWriteLatencyHistogram_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/writeRequestsCount": {
+            "metric": "regionserver.Server.writeRequestCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/getProtocolSignature/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.getProtocolSignature.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/execCoprocessor_num_ops": {
+            "metric": "rpc.rpc.execCoprocessor_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/canCommit_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.canCommit_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getHServerInfo/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.getHServerInfo.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/getRequestLatency_min": {
+            "metric": "regionserver.Server.Get_min",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/getCatalogTracker/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.getCatalogTracker.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/incrementColumnValue_avg_time": {
+            "metric": "rpc.rpc.incrementColumnValue_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/RpcProcessingTime_num_ops": {
+            "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/deleteTable_num_ops": {
+            "metric": "rpc.rpc.deleteTable_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/logError": {
+            "metric": "jvm.JvmMetrics.LogError",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getBlockCacheColumnFamilySummaries_num_ops": {
+            "metric": "rpc.rpc.getBlockCacheColumnFamilySummaries_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/putRequestLatency_75th_percentile": {
+            "metric": "regionserver.Server.Mutate_75th_percentile",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/getFromOnlineRegions/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.getFromOnlineRegions.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getRegionInfo/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.getRegionInfo.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/blockCacheHitCount": {
+            "metric": "regionserver.Server.blockCacheHitCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/exists_avg_time": {
+            "metric": "rpc.rpc.exists_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/slowPutCount": {
+            "metric": "regionserver.Server.slowPutCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsWriteLatency_num_ops": {
+            "metric": "hbase.regionserver.fsWriteLatency_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getOnlineRegions/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.getOnlineRegions.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/exists/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.exists.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/replicateLogEntries/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.replicateLogEntries.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/delete_num_ops": {
+            "metric": "rpc.rpc.delete_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/exists_num_ops": {
+            "metric": "rpc.rpc.exists_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/regionServerStartup_avg_time": {
+            "metric": "rpc.rpc.regionServerStartup_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/checkAndDelete_num_ops": {
+            "metric": "rpc.rpc.checkAndDelete_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/closeRegion_avg_time": {
+            "metric": "rpc.rpc.closeRegion_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getBlockLocalPathInfo_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.getBlockLocalPathInfo_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getProtocolSignature_avg_time": {
+            "metric": "rpc.rpc.getProtocolSignature_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/assign_avg_time": {
+            "metric": "rpc.rpc.assign_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/compactionSize_num_ops": {
+            "metric": "hbase.regionserver.compactionSize_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/close_avg_time": {
+            "metric": "rpc.rpc.close_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/blockCacheSize": {
+            "metric": "regionserver.Server.blockCacheSize",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/putRequestLatency_num_ops": {
+            "metric": "regionserver.Server.Mutate_num_ops",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsBlocked": {
+            "metric": "jvm.JvmMetrics.ThreadsBlocked",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getHServerInfo_num_ops": {
+            "metric": "rpc.rpc.getHServerInfo_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/stop_avg_time": {
+            "metric": "rpc.rpc.stop_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/isStopped_num_ops": {
+            "metric": "rpc.rpc.isStopped_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/putRequestLatency_median": {
+            "metric": "regionserver.Server.Mutate_median",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsReadLatencyHistogram_num_ops": {
+            "metric": "hbase.regionserver.fsReadLatencyHistogram_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsWriteLatencyHistogram_median": {
+            "metric": "hbase.regionserver.fsWriteLatencyHistogram_median",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/isMasterRunning_avg_time": {
+            "metric": "rpc.rpc.isMasterRunning_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/incrementColumnValue_num_ops": {
+            "metric": "rpc.rpc.incrementColumnValue_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/deleteRequestLatency_std_dev": {
+            "metric": "hbase.regionserver.deleteRequestLatency_std_dev",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/bulkLoadHFiles/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.bulkLoadHFiles.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/hdfsBlocksLocalityIndex": {
+            "metric": "hbase.regionserver.hdfsBlocksLocalityIndex",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/readRequestsCount": {
+            "metric": "regionserver.Server.readRequestCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/mem_free": {
+            "metric": "mem_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/execCoprocessor/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.execCoprocessor.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/putRequestLatency_min": {
+            "metric": "regionserver.Server.Mutate_min",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/storefileIndexSizeMB": {
+            "metric": "regionserver.Server.storeFileIndexSize",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/assign_num_ops": {
+            "metric": "rpc.rpc.assign_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/close/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.close.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/deleteRequestLatency_median": {
+            "metric": "regionserver.Server.Delete_median",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/enableTable_avg_time": {
+            "metric": "rpc.rpc.enableTable_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/putRequestLatency_mean": {
+            "metric": "regionserver.Server.Mutate_mean",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/incrementColumnValue/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.incrementColumnValue.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/pkts_out": {
+            "metric": "pkts_out",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/close_num_ops": {
+            "metric": "rpc.rpc.close_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_cached": {
+            "metric": "mem_cached",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getConfiguration/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.getConfiguration.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/done_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.done_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/compactionSize_avg_time": {
+            "metric": "hbase.regionserver.compactionSize_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getFromOnlineRegions_avg_time": {
+            "metric": "rpc.rpc.getFromOnlineRegions_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsReadLatencyHistogram_min": {
+            "metric": "hbase.regionserver.fsReadLatencyHistogram_min",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/increment/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.increment.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/deleteTable_avg_time": {
+            "metric": "rpc.rpc.deleteTable_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/put/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.put.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/delete_avg_time": {
+            "metric": "rpc.rpc.delete_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/statusUpdate_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.statusUpdate_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/openRegions/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.openRegions.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/compactRegion/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.compactRegion.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/RpcProcessingTime_avg_time": {
+            "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthenticationFailures": {
+            "metric": "rpc.rpc.rpcAuthenticationFailures",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/openScanner/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.openScanner.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getClusterStatus_avg_time": {
+            "metric": "rpc.rpc.getClusterStatus_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/unlockRow/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.unlockRow.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/removeFromOnlineRegions_avg_time": {
+            "metric": "rpc.rpc.removeFromOnlineRegions_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/put/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.put.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/modifyTable_avg_time": {
+            "metric": "rpc.rpc.modifyTable_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/checkAndPut_avg_time": {
+            "metric": "rpc.rpc.checkAndPut_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/isStopped/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.isStopped.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/put_avg_time": {
+            "metric": "rpc.rpc.put_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/blockCacheHitRatio": {
+            "metric": "hbase.regionserver.blockCacheHitRatio",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/createTable_avg_time": {
+            "metric": "rpc.rpc.createTable_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getRegionInfo/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.getRegionInfo.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsReadLatencyHistogram_std_dev": {
+            "metric": "hbase.regionserver.fsReadLatencyHistogram_std_dev",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getHTableDescriptors_num_ops": {
+            "metric": "rpc.rpc.getHTableDescriptors_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getAlterStatus_avg_time": {
+            "metric": "rpc.rpc.getAlterStatus_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getRegionInfo_num_ops": {
+            "metric": "rpc.rpc.getRegionInfo_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/statusUpdate_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.statusUpdate_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/compactRegion_num_ops": {
+            "metric": "rpc.rpc.compactRegion_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/isAborted_num_ops": {
+            "metric": "rpc.rpc.isAborted_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getFromOnlineRegions/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.getFromOnlineRegions.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsReadLatencyHistogram_max": {
+            "metric": "hbase.regionserver.fsReadLatencyHistogram_max",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/blockCacheEvictedCount": {
+            "metric": "regionserver.Server.blockCacheEvictionCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/checkOOME/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.checkOOME.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_buffers": {
+            "metric": "mem_buffers",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/disableTable_num_ops": {
+            "metric": "rpc.rpc.disableTable_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/openScanner_num_ops": {
+            "metric": "rpc.rpc.openScanner_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/bulkLoadHFiles/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.bulkLoadHFiles.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_user": {
+            "metric": "cpu_user",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/swap_free": {
+            "metric": "swap_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/regionServerReport_num_ops": {
+            "metric": "rpc.rpc.regionServerReport_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/openRegions_avg_time": {
+            "metric": "rpc.rpc.openRegions_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/exists/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.exists.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/putRequestLatency_99th_percentile": {
+            "metric": "regionserver.Server.Mutate_99th_percentile",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/load/load_fifteen": {
+            "metric": "load_fifteen",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/execCoprocessor/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.execCoprocessor.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/isMasterRunning_num_ops": {
+            "metric": "rpc.rpc.isMasterRunning_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/balanceSwitch_num_ops": {
+            "metric": "rpc.rpc.balanceSwitch_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/offline_num_ops": {
+            "metric": "rpc.rpc.offline_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/getRequestLatency_max": {
+            "metric": "regionserver.Server.Get_max",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/mem_shared": {
+            "metric": "mem_shared",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/abort_num_ops": {
+            "metric": "rpc.rpc.abort_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsReadLatencyHistogram_95th_percentile": {
+            "metric": "hbase.regionserver.fsReadLatencyHistogram_95th_percentile",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/blockCacheHitCachingRatio": {
+            "metric": "hbase.regionserver.blockCacheHitCachingRatio",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/ugi/loginFailure_avg_time": {
+            "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getProtocolVersion_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_num": {
+            "metric": "cpu_num",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/rollHLogWriter_num_ops": {
+            "metric": "rpc.rpc.rollHLogWriter_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/openRegions_num_ops": {
+            "metric": "rpc.rpc.openRegions_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/logFatal": {
+            "metric": "jvm.JvmMetrics.LogFatal",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/splitRegion_avg_time": {
+            "metric": "rpc.rpc.splitRegion_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/closeRegion/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.closeRegion.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/checkAndPut/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.checkAndPut.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/getRequestLatency_99th_percentile": {
+            "metric": "regionserver.Server.Get_99th_percentile",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsWriteLatencyHistogram_min": {
+            "metric": "hbase.regionserver.fsWriteLatencyHistogram_min",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/splitRegion/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.splitRegion.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getProtocolVersion_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsWriteLatencyHistogram_std_dev": {
+            "metric": "hbase.regionserver.fsWriteLatencyHistogram_std_dev",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/ugi/loginSuccess_avg_time": {
+            "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/deleteRequestLatency_99th_percentile": {
+            "metric": "regionserver.Server.Delete_99th_percentile",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/getHServerInfo/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.getHServerInfo.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsWriteLatencyHistogram_max": {
+            "metric": "hbase.regionserver.fsWriteLatencyHistogram_max",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/memNonHeapUsedM": {
+            "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getTask_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.getTask_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/replicateLogEntries_num_ops": {
+            "metric": "rpc.rpc.replicateLogEntries_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/multi_avg_time": {
+            "metric": "rpc.rpc.multi_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/slowIncrementCount": {
+            "metric": "regionserver.Server.slowIncrementCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/putRequestLatency_95th_percentile": {
+            "metric": "regionserver.Server.Mutate_95th_percentile",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/compactionQueueSize": {
+            "metric": "regionserver.Server.compactionQueueLength",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/getCatalogTracker_avg_time": {
+            "metric": "rpc.rpc.getCatalogTracker_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/splitRegion_num_ops": {
+            "metric": "rpc.rpc.splitRegion_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/balance_num_ops": {
+            "metric": "rpc.rpc.balance_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/flushTime_num_ops": {
+            "metric": "hbase.regionserver.flushTime_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/shutdown_num_ops": {
+            "metric": "rpc.rpc.shutdown_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsReadLatency_num_ops": {
+            "metric": "hbase.regionserver.fsReadLatency_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/isAborted/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.isAborted.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_idle": {
+            "metric": "cpu_idle",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/getRequestLatency_75th_percentile": {
+            "metric": "regionserver.Server.Get_75th_percentile",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/getProtocolSignature/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.getProtocolSignature.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getServerName_avg_time": {
+            "metric": "rpc.rpc.getServerName_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/memNonHeapCommittedM": {
+            "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/compactionTime_num_ops": {
+            "metric": "hbase.regionserver.compactionTime_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/abort_avg_time": {
+            "metric": "rpc.rpc.abort_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getCatalogTracker/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.getCatalogTracker.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getBlockLocalPathInfo_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.getBlockLocalPathInfo_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/enableTable_num_ops": {
+            "metric": "rpc.rpc.enableTable_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/lockRow/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.lockRow.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/stores": {
+            "metric": "regionserver.Server.storeCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/addColumn_avg_time": {
+            "metric": "rpc.rpc.addColumn_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/addToOnlineRegions/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.addToOnlineRegions.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/ugi/loginFailure_num_ops": {
+            "metric": "ugi.UgiMetrics.LoginFailureNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_wio": {
+            "metric": "cpu_wio",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getZooKeeper/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.getZooKeeper.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getServerName_num_ops": {
+            "metric": "rpc.rpc.getServerName_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getServerName/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.getServerName.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthenticationSuccesses": {
+            "metric": "rpc.rpc.rpcAuthenticationSuccesses",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/isStopped/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.isStopped.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/disableTable_avg_time": {
+            "metric": "rpc.rpc.disableTable_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/rollHLogWriter/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.rollHLogWriter.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/abort/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.abort.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/openRegion_avg_time": {
+            "metric": "rpc.rpc.openRegion_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/regionServerReport_avg_time": {
+            "metric": "rpc.rpc.regionServerReport_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getAlterStatus_num_ops": {
+            "metric": "rpc.rpc.getAlterStatus_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/flushRegion/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.flushRegion.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/incrementColumnValue/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.incrementColumnValue.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/next_avg_time": {
+            "metric": "rpc.rpc.next_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/getRequestLatency_num_ops": {
+            "metric": "regionserver.Server.Get_num_ops",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/ReceivedBytes": {
+            "metric": "rpc.rpc.ReceivedBytes",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/bulkLoadHFiles_num_ops": {
+            "metric": "rpc.rpc.bulkLoadHFiles_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/ping_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.ping_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsReadLatency_avg_time": {
+            "metric": "hbase.regionserver.fsReadLatency_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/flushSize_num_ops": {
+            "metric": "hbase.regionserver.flushSize_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/gcTimeMillis": {
+            "metric": "jvm.JvmMetrics.GcTimeMillis",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/threadsTerminated": {
+            "metric": "jvm.JvmMetrics.ThreadsTerminated",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getProtocolVersion/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.getProtocolVersion.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/checkAndDelete/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.checkAndDelete.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/balanceSwitch_avg_time": {
+            "metric": "rpc.rpc.balanceSwitch_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/putRequestLatency_max": {
+            "metric": "regionserver.Server.Mutate_max",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/openRegion/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.openRegion.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/lockRow/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.lockRow.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/callQueueLen": {
+            "metric": "rpc.rpc.callQueueLen",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/openRegion_num_ops": {
+            "metric": "rpc.rpc.openRegion_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/compactRegion/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.compactRegion.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsSyncLatency_num_ops": {
+            "metric": "hbase.regionserver.fsSyncLatency_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsWriteLatencyHistogram_95th_percentile": {
+            "metric": "hbase.regionserver.fsWriteLatencyHistogram_95th_percentile",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getOnlineRegions_avg_time": {
+            "metric": "rpc.rpc.getOnlineRegions_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsWriteLatencyHistogram_75th_percentile": {
+            "metric": "hbase.regionserver.fsWriteLatencyHistogram_75th_percentile",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/move_num_ops": {
+            "metric": "rpc.rpc.move_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/stop_num_ops": {
+            "metric": "rpc.rpc.stop_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/replicateLogEntries_avg_time": {
+            "metric": "rpc.rpc.replicateLogEntries_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/getRequestLatency_mean": {
+            "metric": "regionserver.Server.Get_mean",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/get_avg_time": {
+            "metric": "rpc.rpc.get_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/multi_num_ops": {
+            "metric": "rpc.rpc.multi_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/next/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.next.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/addToOnlineRegions_avg_time": {
+            "metric": "rpc.rpc.addToOnlineRegions_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/deleteColumn_avg_time": {
+            "metric": "rpc.rpc.deleteColumn_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/regions": {
+            "metric": "regionserver.Server.regionCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/bulkLoadHFiles_avg_time": {
+            "metric": "rpc.rpc.bulkLoadHFiles_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/isAborted/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.isAborted.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/stop/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.stop.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/addToOnlineRegions_num_ops": {
+            "metric": "rpc.rpc.addToOnlineRegions_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/abort/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.abort.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.removeFromOnlineRegions.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/blockCacheFree": {
+            "metric": "regionserver.Server.blockCacheFreeSize",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/offline_avg_time": {
+            "metric": "rpc.rpc.offline_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/unlockRow_avg_time": {
+            "metric": "rpc.rpc.unlockRow_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/blockCacheMissCount": {
+            "metric": "regionserver.Server.blockCacheMissCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/getCatalogTracker_num_ops": {
+            "metric": "rpc.rpc.getCatalogTracker_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/checkOOME/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.checkOOME.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/flushQueueSize": {
+            "metric": "regionserver.Server.flushQueueLength",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/checkAndPut/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.checkAndPut.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/close/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.close.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/execCoprocessor_avg_time": {
+            "metric": "rpc.rpc.execCoprocessor_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsWriteLatencyHistogram_mean": {
+            "metric": "hbase.regionserver.fsWriteLatencyHistogram_mean",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/createTable_num_ops": {
+            "metric": "rpc.rpc.createTable_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/process/proc_run": {
+            "metric": "proc_run",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getConfiguration_num_ops": {
+            "metric": "rpc.rpc.getConfiguration_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/isStopped_avg_time": {
+            "metric": "rpc.rpc.isStopped_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/RpcQueueTime_num_ops": {
+            "metric": "rpc.rpc.RpcQueueTimeNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/rollHLogWriter_avg_time": {
+            "metric": "rpc.rpc.rollHLogWriter_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsSyncLatency_avg_time": {
+            "metric": "hbase.regionserver.fsSyncLatency_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/deleteRequestLatency_mean": {
+            "metric": "regionserver.Server.Delete_mean",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/addToOnlineRegions/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.addToOnlineRegions.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getMapCompletionEvents_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.getMapCompletionEvents_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_aidle": {
+            "metric": "cpu_aidle",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsReadLatencyHistogram_mean": {
+            "metric": "hbase.regionserver.fsReadLatencyHistogram_mean",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/totalStaticIndexSizeKB": {
+            "metric": "regionserver.Server.staticIndexSize",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/checkAndDelete/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.checkAndDelete.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getFromOnlineRegions_num_ops": {
+            "metric": "rpc.rpc.getFromOnlineRegions_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/mutationsWithoutWALCount": {
+            "metric": "regionserver.Server.mutationsWithoutWALCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/disk/disk_total": {
+            "metric": "disk_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/get/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.get.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/getRequestLatency_median": {
+            "metric": "regionserver.Server.Get_median",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/openScanner_avg_time": {
+            "metric": "rpc.rpc.openScanner_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/RpcSlowResponse_num_ops": {
+            "metric": "rpc.rpc.RpcSlowResponse_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/splitRegion/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.splitRegion.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/isAborted_avg_time": {
+            "metric": "rpc.rpc.isAborted_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getClosestRowBefore/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.getClosestRowBefore.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/flushRegion/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.flushRegion.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/flushSize_avg_time": {
+            "metric": "hbase.regionserver.flushSize_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/commitPending_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.commitPending_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getClosestRowBefore_avg_time": {
+            "metric": "rpc.rpc.getClosestRowBefore_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_speed": {
+            "metric": "cpu_speed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/deleteRequestLatency_max": {
+            "metric": "regionserver.Server.Delete_max",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/get/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.get.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/put_num_ops": {
+            "metric": "rpc.rpc.put_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/move_avg_time": {
+            "metric": "rpc.rpc.move_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/percentFilesLocal": {
+            "metric": "regionserver.Server.percentFilesLocal",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsWriteLatency_avg_time": {
+            "metric": "hbase.regionserver.fsWriteLatency_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/increment/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.increment.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/openRegion/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.openRegion.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/SentBytes": {
+            "metric": "rpc.rpc.SentBytes",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getTask_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.getTask_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/addColumn_num_ops": {
+            "metric": "rpc.rpc.addColumn_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/logWarn": {
+            "metric": "jvm.JvmMetrics.LogWarn",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/maxMemoryM": {
+            "metric": "jvm.metrics.maxMemoryM",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/threadsTimedWaiting": {
+            "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/gcCount": {
+            "metric": "jvm.JvmMetrics.GcCount",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getOnlineRegions_num_ops": {
+            "metric": "rpc.rpc.getOnlineRegions_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/flushTime_avg_time": {
+            "metric": "hbase.regionserver.flushTime_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/done_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.done_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getProtocolVersion_num_ops": {
+            "metric": "rpc.rpc.getProtocolVersion_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/memHeapUsedM": {
+            "metric": "jvm.JvmMetrics.MemHeapUsedM",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/unlockRow_num_ops": {
+            "metric": "rpc.rpc.unlockRow_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/disk/disk_free": {
+            "metric": "disk_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/threadsWaiting": {
+            "metric": "jvm.JvmMetrics.ThreadsWaiting",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/slowGetCount": {
+            "metric": "regionserver.Server.slowGetCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/replicateLogEntries/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.replicateLogEntries.aboveOneSec._avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/shutdown_avg_time": {
+            "metric": "rpc.rpc.shutdown_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/regionServerStartup_num_ops": {
+            "metric": "rpc.rpc.regionServerStartup_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_system": {
+            "metric": "cpu_system",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/requests": {
+            "metric": "regionserver.Server.totalRequestCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsReadLatencyHistogram_99th_percentile": {
+            "metric": "hbase.regionserver.fsReadLatencyHistogram_99th_percentile",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsWriteLatencyHistogram_99th_percentile": {
+            "metric": "hbase.regionserver.fsWriteLatencyHistogram_99th_percentile",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/storefiles": {
+            "metric": "regionserver.Server.storeFileCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/next_num_ops": {
+            "metric": "rpc.rpc.next_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getBlockCacheColumnFamilySummaries_avg_time": {
+            "metric": "rpc.rpc.getBlockCacheColumnFamilySummaries_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/slowDeleteCount": {
+            "metric": "regionserver.Server.slowDeleteCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/checkAndDelete_avg_time": {
+            "metric": "rpc.rpc.checkAndDelete_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/closeRegion/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.closeRegion.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getHServerInfo_avg_time": {
+            "metric": "rpc.rpc.getHServerInfo_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/logInfo": {
+            "metric": "jvm.JvmMetrics.LogInfo",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getZooKeeper_avg_time": {
+            "metric": "rpc.rpc.getZooKeeper_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/hlogFileCount": {
+            "metric": "hbase.regionserver.hlogFileCount",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/getRequestLatency_95th_percentile": {
+            "metric": "regionserver.Server.Get_95th_percentile",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/deleteRequestLatency_95th_percentile": {
+            "metric": "regionserver.Server.Delete_95th_percentile",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/memstoreSizeMB": {
+            "metric": "regionserver.Server.memStoreSize",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/fsReadLatencyHistogram_median": {
+            "metric": "hbase.regionserver.fsReadLatencyHistogram_median",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getProtocolSignature_num_ops": {
+            "metric": "rpc.rpc.getProtocolSignature_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/getProtocolVersion/aboveOneSec/_num_ops": {
+            "metric": "rpc.rpc.getProtocolVersion.aboveOneSec._num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/deleteRequestLatency_75th_percentile": {
+            "metric": "regionserver.Server.Delete_75th_percentile",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthorizationSuccesses": {
+            "metric": "rpc.rpc.rpcAuthorizationSuccesses",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/totalStaticBloomSizeKB": {
+            "metric": "regionserver.Server.staticBloomSize",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/checkAndPut_num_ops": {
+            "metric": "rpc.rpc.checkAndPut_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/increment_avg_time": {
+            "metric": "rpc.rpc.increment_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          }
+        }
+      },
+      {
+        "type": "jmx",
+        "metrics": {
+          "metrics/hbase/regionserver/slowPutCount": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowPutCount",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/percentFilesLocal": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.percentFilesLocal",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/deleteRequestLatency_min": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_min",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/blockCacheFree": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheFreeSize",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/mutationsWithoutWALSize": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.mutationsWithoutWALSize",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/blockCacheMissCount": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheMissCount",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/flushQueueSize": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.flushQueueLength",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/deleteRequestLatency_99th_percentile": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_99th_percentile",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/getRequestLatency_num_ops": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/slowAppendCount": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowAppendCount",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/blockCacheSize": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheSize",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/putRequestLatency_num_ops": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/slowIncrementCount": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowIncrementCount",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/blockCacheEvictedCount": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheEvictionCount",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/putRequestLatency_95th_percentile": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_95th_percentile",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/compactionQueueSize": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.compactionQueueLength",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/putRequestLatency_median": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_median",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/deleteRequestLatency_mean": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_mean",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/slowGetCount": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowGetCount",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/blockCacheCount": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheCount",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/getRequestLatency_75th_percentile": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_75th_percentile",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/readRequestsCount": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.readRequestCount",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/putRequestLatency_min": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_min",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/storefileIndexSizeMB": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.storeFileIndexSize",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/deleteRequestLatency_median": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_median",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/putRequestLatency_max": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_max",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/totalStaticIndexSizeKB": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.staticIndexSize",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/deleteRequestLatency_num_ops": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/putRequestLatency_mean": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_mean",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/requests": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.totalRequestCount",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/storefiles": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.storeFileCount",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/mutationsWithoutWALCount": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.mutationsWithoutWALCount",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/writeRequestsCount": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.writeRequestCount",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/getRequestLatency_median": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_median",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/slowDeleteCount": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowDeleteCount",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/putRequestLatency_99th_percentile": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_99th_percentile",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/stores": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.storeCount",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/getRequestLatency_min": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_min",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/getRequestLatency_95th_percentile": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_95th_percentile",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/deleteRequestLatency_95th_percentile": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_95th_percentile",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/memstoreSizeMB": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.memStoreSize",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/getRequestLatency_max": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_max",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/getRequestLatency_mean": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_mean",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/deleteRequestLatency_75th_percentile": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_75th_percentile",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/deleteRequestLatency_max": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_max",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/putRequestLatency_75th_percentile": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_75th_percentile",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/regions": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.regionCount",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/totalStaticBloomSizeKB": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.staticBloomSize",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/blockCacheHitCount": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheHitCount",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/hbase/regionserver/getRequestLatency_99th_percentile": {
+            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_99th_percentile",
+            "pointInTime": true,
+            "temporal": false
+          }
+        }
+      }
+    ],
+    "HostComponent": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "metrics/hbase/regionserver/compactionTime_avg_time": {
+            "metric": "hbase.regionserver.compactionTime_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/closeRegion_num_ops": {
+            "metric": "rpc.rpc.closeRegion_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/hbase/regionserver/mutationsWithoutWALSize": {
+            "metric": "regionserver.Server.mutationsWithoutWALSize",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/unassign_num_ops": {
+            "metric": "rpc.rpc.unassign_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/modifyTable_num_ops": {
+            "metric": "rpc.rpc.modifyTable_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/splitRegion/aboveOneSec/_avg_time": {
+            "metric": "rpc.rpc.splitRegion.aboveOneSec._avg_time",
+            "pointInTime": tru

<TRUNCATED>

[11/37] AMBARI-4341. Rename 2.0.8 to 2.1.1 in the stack definition. (mahadev)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/files/hbaseSmokeVerify.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/files/hbaseSmokeVerify.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/files/hbaseSmokeVerify.sh
new file mode 100644
index 0000000..39fe6e5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/files/hbaseSmokeVerify.sh
@@ -0,0 +1,32 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+conf_dir=$1
+data=$2
+echo "scan 'ambarismoketest'" | hbase --config $conf_dir shell > /tmp/hbase_chk_verify
+cat /tmp/hbase_chk_verify
+echo "Looking for $data"
+grep -q $data /tmp/hbase_chk_verify
+if [ "$?" -ne 0 ]
+then
+  exit 1
+fi
+
+grep -q '1 row(s)' /tmp/hbase_chk_verify
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/__init__.py
new file mode 100644
index 0000000..5561e10
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/__init__.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/functions.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/functions.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/functions.py
new file mode 100644
index 0000000..80b49e6
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/functions.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+import re
+import math
+import datetime
+
+from resource_management.core.shell import checked_call
+
+def calc_xmn_from_xms(heapsize_str, xmn_percent, xmn_max):
+  """
+  @param heapsize_str: str (e.g '1000m')
+  @param xmn_percent: float (e.g 0.2)
+  @param xmn_max: integer (e.g 512)
+  """
+  heapsize = int(re.search('\d+',heapsize_str).group(0))
+  heapsize_unit = re.search('\D+',heapsize_str).group(0)
+  xmn_val = int(math.floor(heapsize*xmn_percent))
+  xmn_val -= xmn_val % 8
+  
+  result_xmn_val = xmn_max if xmn_val > xmn_max else xmn_val
+  return str(result_xmn_val) + heapsize_unit
+
+def get_unique_id_and_date():
+    code, out = checked_call("hostid")
+    id = out.strip()
+    
+    now = datetime.datetime.now()
+    date = now.strftime("%M%d%y")
+
+    return "id{id}_date{date}".format(id=id, date=date)
+  
+def get_kinit_path(pathes_list):
+  """
+  @param pathes: comma separated list
+  """
+  kinit_path = ""
+  
+  for x in pathes_list:
+    if not x:
+      continue
+    
+    path = os.path.join(x,"kinit")
+
+    if os.path.isfile(path):
+      kinit_path = path
+      break
+    
+  return kinit_path

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/hbase.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/hbase.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/hbase.py
new file mode 100644
index 0000000..bd33463
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/hbase.py
@@ -0,0 +1,91 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import sys
+
+def hbase(type=None # 'master' or 'regionserver' or 'client'
+              ):
+  import params
+  
+  Directory( params.conf_dir,
+      owner = params.hbase_user,
+      group = params.user_group,
+      recursive = True
+  )
+  
+  XmlConfig( "hbase-site.xml",
+            conf_dir = params.conf_dir,
+            configurations = params.config['configurations']['hbase-site'],
+            owner = params.hbase_user,
+            group = params.user_group
+  )
+
+  XmlConfig( "hdfs-site.xml",
+            conf_dir = params.conf_dir,
+            configurations = params.config['configurations']['hdfs-site'],
+            owner = params.hbase_user,
+            group = params.user_group
+  )
+  
+  if 'hbase-policy' in params.config['configurations']:
+    XmlConfig( "hbase-policy.xml",
+      configurations = params.config['configurations']['hbase-policy'],
+      owner = params.hbase_user,
+      group = params.user_group
+    )
+  # Manually overriding ownership of file installed by hadoop package
+  else: 
+    File( format("{conf_dir}/hbase-policy.xml"),
+      owner = params.hbase_user,
+      group = params.user_group
+    )
+  
+  hbase_TemplateConfig( 'hbase-env.sh')     
+       
+  hbase_TemplateConfig( params.metric_prop_file_name,
+    tag = 'GANGLIA-MASTER' if type == 'master' else 'GANGLIA-RS'
+  )
+
+  hbase_TemplateConfig( 'regionservers')
+
+  if params.security_enabled:
+    hbase_TemplateConfig( format("hbase_{type}_jaas.conf"))
+  
+  if type != "client":
+    Directory( params.pid_dir,
+      owner = params.hbase_user,
+      recursive = True
+    )
+  
+    Directory ( [params.tmp_dir, params.log_dir],
+      owner = params.hbase_user,
+      recursive = True
+    )    
+
+def hbase_TemplateConfig(name, 
+                         tag=None
+                         ):
+  import params
+
+  TemplateConfig( format("{conf_dir}/{name}"),
+      owner = params.hbase_user,
+      template_tag = tag
+  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/hbase_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/hbase_client.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/hbase_client.py
new file mode 100644
index 0000000..0f2a1bc
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/hbase_client.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from hbase import hbase
+
+         
+class HbaseClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+    
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    
+    hbase(type='client')
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+#for tests
+def main():
+  command_type = 'install'
+  command_data_file = '/root/workspace/HBase/input.json'
+  basedir = '/root/workspace/HBase/'
+  stdoutfile = '/1.txt'
+  sys.argv = ["", command_type, command_data_file, basedir, stdoutfile]
+  
+  HbaseClient().execute()
+  
+if __name__ == "__main__":
+  HbaseClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/hbase_master.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/hbase_master.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/hbase_master.py
new file mode 100644
index 0000000..d94b4b4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/hbase_master.py
@@ -0,0 +1,74 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from hbase import hbase
+from hbase_service import hbase_service
+
+         
+class HbaseMaster(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+    
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    hbase(type='master')
+    
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # for security
+
+    hbase_service( 'master',
+      action = 'start'
+    )
+    
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    hbase_service( 'master',
+      action = 'stop'
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{pid_dir}/hbase-hbase-master.pid")
+    check_process_status(pid_file)
+
+def main():
+  command_type = sys.argv[1] if len(sys.argv)>1 else "install"
+  print "Running "+command_type
+  command_data_file = '/var/lib/ambari-agent/data/command-3.json'
+  basedir = '/root/ambari/ambari-server/src/main/resources/stacks/HDP/2.0._/services/HBASE/package'
+  stroutputf = '/1.txt'
+  sys.argv = ["", command_type, command_data_file, basedir, stroutputf]
+  
+  HbaseMaster().execute()
+  
+if __name__ == "__main__":
+  HbaseMaster().execute()
+  #main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/hbase_regionserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/hbase_regionserver.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/hbase_regionserver.py
new file mode 100644
index 0000000..2d91e75
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/hbase_regionserver.py
@@ -0,0 +1,75 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from hbase import hbase
+from hbase_service import hbase_service
+
+         
+class HbaseRegionServer(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+    
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    hbase(type='regionserver')
+      
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # for security
+
+    hbase_service( 'regionserver',
+      action = 'start'
+    )
+    
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    hbase_service( 'regionserver',
+      action = 'stop'
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{pid_dir}/hbase-hbase-regionserver.pid")
+    check_process_status(pid_file)
+    
+  def decommission(self, env):
+    print "Decommission not yet implemented!"
+    
+def main():
+  command_type = sys.argv[1] if len(sys.argv)>1 else "stop"
+  print "Running "+command_type
+  command_data_file = '/root/workspace/HBase/input.json'
+  basedir = '/root/workspace/HBase/main'
+  sys.argv = ["", command_type, command_data_file, basedir]
+  
+  HbaseRegionServer().execute()
+  
+if __name__ == "__main__":
+  HbaseRegionServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/hbase_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/hbase_service.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/hbase_service.py
new file mode 100644
index 0000000..7a1248b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/hbase_service.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+def hbase_service(
+  name,
+  action = 'start'): # 'start' or 'stop' or 'status'
+    
+    import params
+  
+    role = name
+    cmd = format("{daemon_script} --config {conf_dir}")
+    pid_file = format("{pid_dir}/hbase-hbase-{role}.pid")
+    
+    daemon_cmd = None
+    no_op_test = None
+    
+    if action == 'start':
+      daemon_cmd = format("{cmd} start {role}")
+      no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
+    elif action == 'stop':
+      daemon_cmd = format("{cmd} stop {role} && rm -f {pid_file}")
+
+    if daemon_cmd is not None:
+      Execute ( daemon_cmd,
+        not_if = no_op_test,
+        user = params.hbase_user
+      )

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/params.py
new file mode 100644
index 0000000..674b2d9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/params.py
@@ -0,0 +1,84 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import functions
+import status_params
+
+# server configurations
+config = Script.get_config()
+
+conf_dir = "/etc/hbase/conf"
+daemon_script = "/usr/lib/hbase/bin/hbase-daemon.sh"
+
+hbase_user = config['configurations']['global']['hbase_user']
+smokeuser = config['configurations']['global']['smokeuser']
+security_enabled = config['configurations']['global']['security_enabled']
+user_group = config['configurations']['global']['user_group']
+
+# this is "hadoop-metrics.properties" for 1.x stacks
+metric_prop_file_name = "hadoop-metrics2-hbase.properties"
+
+# not supporting 32 bit jdk.
+java64_home = config['hostLevelParams']['java_home']
+
+log_dir = config['configurations']['global']['hbase_log_dir']
+master_heapsize = config['configurations']['global']['hbase_master_heapsize']
+
+regionserver_heapsize = config['configurations']['global']['hbase_regionserver_heapsize']
+regionserver_xmn_size = functions.calc_xmn_from_xms(regionserver_heapsize, 0.2, 512)
+
+pid_dir = status_params.pid_dir
+tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
+
+client_jaas_config_file = default('hbase_client_jaas_config_file', format("{conf_dir}/hbase_client_jaas.conf"))
+master_jaas_config_file = default('hbase_master_jaas_config_file', format("{conf_dir}/hbase_master_jaas.conf"))
+regionserver_jaas_config_file = default('hbase_regionserver_jaas_config_file', format("{conf_dir}/hbase_regionserver_jaas.conf"))
+
+ganglia_server_hosts = default('/clusterHostInfo/ganglia_server_host', []) # is not passed when ganglia is not present
+ganglia_server_host = '' if len(ganglia_server_hosts) == 0 else ganglia_server_hosts[0]
+
+rs_hosts = default('hbase_rs_hosts', config['clusterHostInfo']['slave_hosts']) #if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
+
+smoke_test_user = config['configurations']['global']['smokeuser']
+smokeuser_permissions = default('smokeuser_permissions', "RWXCA")
+service_check_data = get_unique_id_and_date()
+
+if security_enabled:
+  
+  _use_hostname_in_principal = default('instance_name', True)
+  _master_primary_name = config['configurations']['global']['hbase_master_primary_name']
+  _hostname = config['hostname']
+  _kerberos_domain = config['configurations']['global']['kerberos_domain']
+  _master_principal_name = config['configurations']['global']['hbase_master_principal_name']
+  _regionserver_primary_name = config['configurations']['global']['hbase_regionserver_primary_name']
+  
+  if _use_hostname_in_principal:
+    master_jaas_princ = format("{_master_primary_name}/{_hostname}@{_kerberos_domain}")
+    regionserver_jaas_princ = format("{_regionserver_primary_name}/{_hostname}@{_kerberos_domain}")
+  else:
+    master_jaas_princ = format("{_master_principal_name}@{_kerberos_domain}")
+    regionserver_jaas_princ = format("{_regionserver_primary_name}@{_kerberos_domain}")
+    
+master_keytab_path = config['configurations']['hbase-site']['hbase.master.keytab.file']
+regionserver_keytab_path = config['configurations']['hbase-site']['hbase.regionserver.keytab.file']
+smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
+hbase_user_keytab = config['configurations']['global']['hbase_user_keytab']
+kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/service_check.py
new file mode 100644
index 0000000..ff6d0ed
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/service_check.py
@@ -0,0 +1,89 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import functions
+
+
+class HbaseServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    
+    output_file = "/apps/hbase/data/ambarismoketest"
+    test_cmd = format("fs -test -e {output_file}")
+    kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smoke_test_user};") if params.security_enabled else ""
+    hbase_servicecheck_file = '/tmp/hbase-smoke.sh'
+  
+    File( '/tmp/hbaseSmokeVerify.sh',
+      content = StaticFile("hbaseSmokeVerify.sh"),
+      mode = 0755
+    )
+  
+    File( hbase_servicecheck_file,
+      mode = 0755,
+      content = Template('hbase-smoke.sh.j2')
+    )
+    
+    if params.security_enabled:    
+      hbase_grant_premissions_file = '/tmp/hbase_grant_permissions.sh'
+      hbase_kinit_cmd = format("{kinit_path_local} -kt {hbase_user_keytab} {hbase_user};")
+      grantprivelegecmd = format("{hbase_kinit_cmd} hbase shell {hbase_grant_premissions_file}")
+  
+      File( hbase_grant_premissions_file,
+        owner   = params.hbase_user,
+        group   = params.user_group,
+        mode    = 0644,
+        content = Template('hbase_grant_permissions.j2')
+      )
+      
+      Execute( grantprivelegecmd,
+        user = params.hbase_user,
+      )
+
+    servicecheckcmd = format("{kinit_cmd} hbase --config {conf_dir} shell {hbase_servicecheck_file}")
+    smokeverifycmd = format("{kinit_cmd} /tmp/hbaseSmokeVerify.sh {conf_dir} {service_check_data}")
+  
+    Execute( servicecheckcmd,
+      tries     = 3,
+      try_sleep = 5,
+      user = params.smoke_test_user,
+      logoutput = True
+    )
+  
+    Execute ( smokeverifycmd,
+      tries     = 3,
+      try_sleep = 5,
+      user = params.smoke_test_user,
+      logoutput = True
+    )
+    
+def main():
+  import sys
+  command_type = 'perform'
+  command_data_file = '/root/workspace/HBase/input.json'
+  basedir = '/root/workspace/HBase/main'
+  sys.argv = ["", command_type, command_data_file, basedir]
+  
+  HbaseServiceCheck().execute()
+  
+if __name__ == "__main__":
+  HbaseServiceCheck().execute()
+  

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/status_params.py
new file mode 100644
index 0000000..c9b20ef
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/scripts/status_params.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+pid_dir = config['configurations']['global']['hbase_pid_dir']

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
new file mode 100644
index 0000000..2583f44
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-MASTER.j2
@@ -0,0 +1,62 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See http://wiki.apache.org/hadoop/GangliaMetrics
+#
+# Make sure you know whether you are using ganglia 3.0 or 3.1.
+# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
+# And, yes, this file is named hadoop-metrics.properties rather than
+# hbase-metrics.properties because we're leveraging the hadoop metrics
+# package and hadoop-metrics.properties is an hardcoded-name, at least
+# for the moment.
+#
+# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
+
+# HBase-specific configuration to reset long-running stats (e.g. compactions)
+# If this variable is left out, then the default is no expiration.
+hbase.extendedperiod = 3600
+
+# Configuration of the "hbase" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+hbase.period=10
+hbase.servers={{ganglia_server_host}}:8663
+
+# Configuration of the "jvm" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+jvm.period=10
+jvm.servers={{ganglia_server_host}}:8663
+
+# Configuration of the "rpc" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+rpc.period=10
+rpc.servers={{ganglia_server_host}}:8663
+
+#Ganglia following hadoop example
+hbase.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
+hbase.sink.ganglia.period=10
+
+# default for supportsparse is false
+*.sink.ganglia.supportsparse=true
+
+.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
+.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+
+hbase.sink.ganglia.servers={{ganglia_server_host}}:8663

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
new file mode 100644
index 0000000..9f2b616
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hadoop-metrics2-hbase.properties-GANGLIA-RS.j2
@@ -0,0 +1,62 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# See http://wiki.apache.org/hadoop/GangliaMetrics
+#
+# Make sure you know whether you are using ganglia 3.0 or 3.1.
+# If 3.1, you will have to patch your hadoop instance with HADOOP-4675
+# And, yes, this file is named hadoop-metrics.properties rather than
+# hbase-metrics.properties because we're leveraging the hadoop metrics
+# package and hadoop-metrics.properties is an hardcoded-name, at least
+# for the moment.
+#
+# See also http://hadoop.apache.org/hbase/docs/current/metrics.html
+
+# HBase-specific configuration to reset long-running stats (e.g. compactions)
+# If this variable is left out, then the default is no expiration.
+hbase.extendedperiod = 3600
+
+# Configuration of the "hbase" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+hbase.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+hbase.period=10
+hbase.servers={{ganglia_server_host}}:8660
+
+# Configuration of the "jvm" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+jvm.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+jvm.period=10
+jvm.servers={{ganglia_server_host}}:8660
+
+# Configuration of the "rpc" context for ganglia
+# Pick one: Ganglia 3.0 (former) or Ganglia 3.1 (latter)
+# rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext
+rpc.class=org.apache.hadoop.metrics.ganglia.GangliaContext31
+rpc.period=10
+rpc.servers={{ganglia_server_host}}:8660
+
+#Ganglia following hadoop example
+hbase.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
+hbase.sink.ganglia.period=10
+
+# default for supportsparse is false
+*.sink.ganglia.supportsparse=true
+
+.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
+.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+
+hbase.sink.ganglia.servers={{ganglia_server_host}}:8660

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase-env.sh.j2
new file mode 100644
index 0000000..b8505b5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase-env.sh.j2
@@ -0,0 +1,82 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# Set environment variables here.
+
+# The java implementation to use. Java 1.6 required.
+export JAVA_HOME={{java64_home}}
+
+# HBase Configuration directory
+export HBASE_CONF_DIR=${HBASE_CONF_DIR:-{{conf_dir}}}
+
+# Extra Java CLASSPATH elements. Optional.
+export HBASE_CLASSPATH=${HBASE_CLASSPATH}
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+# export HBASE_HEAPSIZE=1000
+
+# Extra Java runtime options.
+# Below are what we set by default. May only work with SUN JVM.
+# For more on why as well as other possible settings,
+# see http://wiki.apache.org/hadoop/PerformanceTuning
+export HBASE_OPTS="-XX:+UseConcMarkSweepGC -XX:ErrorFile={{log_dir}}/hs_err_pid%p.log"
+export SERVER_GC_OPTS="-verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:{{log_dir}}/gc.log-`date +'%Y%m%d%H%M'`"
+# Uncomment below to enable java garbage collection logging.
+# export HBASE_OPTS="$HBASE_OPTS -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCDateStamps -Xloggc:$HBASE_HOME/logs/gc-hbase.log"
+
+# Uncomment and adjust to enable JMX exporting
+# See jmxremote.password and jmxremote.access in $JRE_HOME/lib/management to configure remote password access.
+# More details at: http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
+#
+# export HBASE_JMX_BASE="-Dcom.sun.management.jmxremote.ssl=false -Dcom.sun.management.jmxremote.authenticate=false"
+export HBASE_MASTER_OPTS="-Xmx{{master_heapsize}}"
+export HBASE_REGIONSERVER_OPTS="-Xmn{{regionserver_xmn_size}} -XX:CMSInitiatingOccupancyFraction=70  -Xms{{regionserver_heapsize}} -Xmx{{regionserver_heapsize}}"
+# export HBASE_THRIFT_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10103"
+# export HBASE_ZOOKEEPER_OPTS="$HBASE_JMX_BASE -Dcom.sun.management.jmxremote.port=10104"
+
+# File naming hosts on which HRegionServers will run. $HBASE_HOME/conf/regionservers by default.
+export HBASE_REGIONSERVERS=${HBASE_CONF_DIR}/regionservers
+
+# Extra ssh options. Empty by default.
+# export HBASE_SSH_OPTS="-o ConnectTimeout=1 -o SendEnv=HBASE_CONF_DIR"
+
+# Where log files are stored. $HBASE_HOME/logs by default.
+export HBASE_LOG_DIR={{log_dir}}
+
+# A string representing this instance of hbase. $USER by default.
+# export HBASE_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes. See 'man nice'.
+# export HBASE_NICENESS=10
+
+# The directory where pid files are stored. /tmp by default.
+export HBASE_PID_DIR={{pid_dir}}
+
+# Seconds to sleep between slave commands. Unset by default. This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HBASE_SLAVE_SLEEP=0.1
+
+# Tell HBase whether it should manage it's own instance of Zookeeper or not.
+export HBASE_MANAGES_ZK=false
+
+{% if security_enabled %}
+export HBASE_OPTS="$HBASE_OPTS -Djava.security.auth.login.config={{client_jaas_config_file}}"
+export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -Djava.security.auth.login.config={{master_jaas_config_file}}"
+export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -Djava.security.auth.login.config={{regionserver_jaas_config_file}}"
+{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase-smoke.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase-smoke.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase-smoke.sh.j2
new file mode 100644
index 0000000..61fe62f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase-smoke.sh.j2
@@ -0,0 +1,26 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+disable 'ambarismoketest'
+drop 'ambarismoketest'
+create 'ambarismoketest','family'
+put 'ambarismoketest','row01','family:col01','{{service_check_data}}'
+scan 'ambarismoketest'
+exit
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase_client_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase_client_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase_client_jaas.conf.j2
new file mode 100644
index 0000000..696718e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase_client_jaas.conf.j2
@@ -0,0 +1,5 @@
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=false
+useTicketCache=true;
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase_grant_permissions.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase_grant_permissions.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase_grant_permissions.j2
new file mode 100644
index 0000000..9102d35
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase_grant_permissions.j2
@@ -0,0 +1,21 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+grant '{{smoke_test_user}}', '{{smokeuser_permissions}}'
+exit
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase_master_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase_master_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase_master_jaas.conf.j2
new file mode 100644
index 0000000..722cfcc
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase_master_jaas.conf.j2
@@ -0,0 +1,8 @@
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=true
+storeKey=true
+useTicketCache=false
+keyTab="{{master_keytab_path}}"
+principal="{{master_jaas_princ}}";
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2
new file mode 100644
index 0000000..cb9b7b0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/hbase_regionserver_jaas.conf.j2
@@ -0,0 +1,8 @@
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=true
+storeKey=true
+useTicketCache=false
+keyTab="{{regionserver_keytab_path}}"
+principal="{{regionserver_jaas_princ}}";
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/regionservers.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/regionservers.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/regionservers.j2
new file mode 100644
index 0000000..b22ae5f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/package/templates/regionservers.j2
@@ -0,0 +1,2 @@
+{% for host in rs_hosts %}{{host}}
+{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/configuration/core-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/configuration/core-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/configuration/core-site.xml
new file mode 100644
index 0000000..e244fc7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/configuration/core-site.xml
@@ -0,0 +1,167 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+ <!--
+    Licensed to the Apache Software Foundation (ASF) under one or more
+    contributor license agreements.  See the NOTICE file distributed with
+    this work for additional information regarding copyright ownership.
+    The ASF licenses this file to You under the Apache License, Version 2.0
+    (the "License"); you may not use this file except in compliance with
+    the License.  You may obtain a copy of the License at
+ 
+        http://www.apache.org/licenses/LICENSE-2.0
+ 
+    Unless required by applicable law or agreed to in writing, software
+    distributed under the License is distributed on an "AS IS" BASIS,
+    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+    See the License for the specific language governing permissions and
+    limitations under the License.
+ -->
+ 
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration xmlns:xi="http://www.w3.org/2001/XInclude">
+
+<!-- i/o properties -->
+
+  <property>
+    <name>io.file.buffer.size</name>
+    <value>131072</value>
+    <description>The size of buffer for use in sequence files.
+  The size of this buffer should probably be a multiple of hardware
+  page size (4096 on Intel x86), and it determines how much data is
+  buffered during read and write operations.</description>
+  </property>
+
+  <property>
+    <name>io.serializations</name>
+    <value>org.apache.hadoop.io.serializer.WritableSerialization</value>
+  </property>
+
+  <property>
+    <name>io.compression.codecs</name>
+    <value>org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.DefaultCodec</value>
+    <description>A list of the compression codec classes that can be used
+                 for compression/decompression.</description>
+  </property>
+
+<!-- file system properties -->
+
+  <property>
+    <name>fs.defaultFS</name>
+    <!-- cluster variant -->
+    <value>hdfs://localhost:8020</value>
+    <description>The name of the default file system.  Either the
+  literal string "local" or a host:port for NDFS.</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>fs.trash.interval</name>
+    <value>360</value>
+    <description>Number of minutes between trash checkpoints.
+  If zero, the trash feature is disabled.
+  </description>
+  </property>
+
+  <!-- ipc properties: copied from kryptonite configuration -->
+  <property>
+    <name>ipc.client.idlethreshold</name>
+    <value>8000</value>
+    <description>Defines the threshold number of connections after which
+               connections will be inspected for idleness.
+  </description>
+  </property>
+
+  <property>
+    <name>ipc.client.connection.maxidletime</name>
+    <value>30000</value>
+    <description>The maximum time after which a client will bring down the
+               connection to the server.
+  </description>
+  </property>
+
+  <property>
+    <name>ipc.client.connect.max.retries</name>
+    <value>50</value>
+    <description>Defines the maximum number of retries for IPC connections.</description>
+  </property>
+
+  <!-- Web Interface Configuration -->
+  <property>
+    <name>mapreduce.jobtracker.webinterface.trusted</name>
+    <value>false</value>
+    <description> If set to true, the web interfaces of JT and NN may contain
+                actions, such as kill job, delete file, etc., that should
+                not be exposed to public. Enable this option if the interfaces
+                are only reachable by those who have the right authorization.
+  </description>
+  </property>
+
+ <property>
+   <name>hadoop.security.authentication</name>
+   <value>simple</value>
+   <description>
+   Set the authentication for the cluster. Valid values are: simple or
+   kerberos.
+   </description>
+ </property>
+<property>
+  <name>hadoop.security.authorization</name>
+  <value>false</value>
+  <description>
+     Enable authorization for different protocols.
+  </description>
+</property>
+
+  <property>
+    <name>hadoop.security.auth_to_local</name>
+    <value>
+        RULE:[2:$1@$0]([rn]m@.*)s/.*/yarn/
+        RULE:[2:$1@$0](jhs@.*)s/.*/mapred/
+        RULE:[2:$1@$0]([nd]n@.*)s/.*/hdfs/
+        RULE:[2:$1@$0](hm@.*)s/.*/hbase/
+        RULE:[2:$1@$0](rs@.*)s/.*/hbase/
+        DEFAULT
+    </value>
+<description>The mapping from kerberos principal names to local OS mapreduce.job.user.names.
+  So the default rule is just "DEFAULT" which takes all principals in your default domain to their first component.
+  "omalley@APACHE.ORG" and "omalley/admin@APACHE.ORG" to "omalley", if your default domain is APACHE.ORG.
+The translations rules have 3 sections:
+      base     filter    substitution
+The base consists of a number that represents the number of components in the principal name excluding the realm and the pattern for building the name from the sections of the principal name. The base uses $0 to mean the realm, $1 to mean the first component and $2 to mean the second component.
+
+[1:$1@$0] translates "omalley@APACHE.ORG" to "omalley@APACHE.ORG"
+[2:$1] translates "omalley/admin@APACHE.ORG" to "omalley"
+[2:$1%$2] translates "omalley/admin@APACHE.ORG" to "omalley%admin"
+
+The filter is a regex in parens that must the generated string for the rule to apply.
+
+"(.*%admin)" will take any string that ends in "%admin"
+"(.*@ACME.COM)" will take any string that ends in "@ACME.COM"
+
+Finally, the substitution is a sed rule to translate a regex into a fixed string.
+
+"s/@ACME\.COM//" removes the first instance of "@ACME.COM".
+"s/@[A-Z]*\.COM//" removes the first instance of "@" followed by a name followed by ".COM".
+"s/X/Y/g" replaces all of the "X" in the name with "Y"
+
+So, if your default realm was APACHE.ORG, but you also wanted to take all principals from ACME.COM that had a single component "joe@ACME.COM", you'd do:
+
+RULE:[1:$1@$0](.@ACME.ORG)s/@.//
+DEFAULT
+
+To also translate the names with a second component, you'd make the rules:
+
+RULE:[1:$1@$0](.@ACME.ORG)s/@.//
+RULE:[2:$1@$0](.@ACME.ORG)s/@.//
+DEFAULT
+
+If you want to treat all principals from APACHE.ORG with /admin as "admin", your rules would look like:
+
+RULE[2:$1%$2@$0](.%admin@APACHE.ORG)s/./admin/
+DEFAULT
+    </description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/configuration/global.xml
new file mode 100644
index 0000000..49d66bb
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/configuration/global.xml
@@ -0,0 +1,192 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>namenode_host</name>
+    <value></value>
+    <description>NameNode Host.</description>
+  </property>
+  <property>
+    <name>dfs_namenode_name_dir</name>
+    <value>/hadoop/hdfs/namenode</value>
+    <description>NameNode Directories.</description>
+  </property>
+  <property>
+    <name>snamenode_host</name>
+    <value></value>
+    <description>Secondary NameNode.</description>
+  </property>
+  <property>
+    <name>dfs_namenode_checkpoint_dir</name>
+    <value>/hadoop/hdfs/namesecondary</value>
+    <description>Secondary NameNode checkpoint dir.</description>
+  </property>
+  <property>
+    <name>datanode_hosts</name>
+    <value></value>
+    <description>List of Datanode Hosts.</description>
+  </property>
+  <property>
+    <name>dfs_datanode_data_dir</name>
+    <value>/hadoop/hdfs/data</value>
+    <description>Data directories for Data Nodes.</description>
+  </property>
+  <property>
+    <name>hdfs_log_dir_prefix</name>
+    <value>/var/log/hadoop</value>
+    <description>Hadoop Log Dir Prefix</description>
+  </property>
+  <property>
+    <name>hadoop_pid_dir_prefix</name>
+    <value>/var/run/hadoop</value>
+    <description>Hadoop PID Dir Prefix</description>
+  </property>
+  <property>
+    <name>dfs_webhdfs_enabled</name>
+    <value>true</value>
+    <description>WebHDFS enabled</description>
+  </property>
+  <property>
+    <name>hadoop_heapsize</name>
+    <value>1024</value>
+    <description>Hadoop maximum Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_heapsize</name>
+    <value>1024</value>
+    <description>NameNode Java heap size</description>
+  </property>
+  <property>
+    <name>namenode_opt_newsize</name>
+    <value>200</value>
+    <description>NameNode new generation size</description>
+  </property>
+  <property>
+    <name>namenode_opt_maxnewsize</name>
+    <value>200</value>
+    <description>NameNode maximum new generation size</description>
+  </property>
+  <property>
+    <name>datanode_du_reserved</name>
+    <value>1073741824</value>
+    <description>Reserved space for HDFS</description>
+  </property>
+  <property>
+    <name>dtnode_heapsize</name>
+    <value>1024</value>
+    <description>DataNode maximum Java heap size</description>
+  </property>
+  <property>
+    <name>dfs_datanode_failed_volume_tolerated</name>
+    <value>0</value>
+    <description>DataNode volumes failure toleration</description>
+  </property>
+  <property>
+    <name>dfs_namenode_checkpoint_period</name>
+    <value>21600</value>
+    <description>HDFS Maximum Checkpoint Delay</description>
+  </property>
+  <property>
+    <name>fs_checkpoint_size</name>
+    <value>0.5</value>
+    <description>FS Checkpoint Size.</description>
+  </property>
+  <property>
+    <name>proxyuser_group</name>
+    <value>users</value>
+    <description>Proxy user group.</description>
+  </property>
+  <property>
+    <name>dfs_exclude</name>
+    <value></value>
+    <description>HDFS Exclude hosts.</description>
+  </property>
+  <property>
+    <name>dfs_replication</name>
+    <value>3</value>
+    <description>Default Block Replication.</description>
+  </property>
+  <property>
+    <name>dfs_block_local_path_access_user</name>
+    <value>hbase</value>
+    <description>Default Block Replication.</description>
+  </property>
+  <property>
+    <name>dfs_datanode_address</name>
+    <value>50010</value>
+    <description>Port for datanode address.</description>
+  </property>
+  <property>
+    <name>dfs_datanode_http_address</name>
+    <value>50075</value>
+    <description>Port for datanode address.</description>
+  </property>
+  <property>
+    <name>dfs_datanode_data_dir_perm</name>
+    <value>750</value>
+    <description>Datanode dir perms.</description>
+  </property>
+
+  <property>
+    <name>security_enabled</name>
+    <value>false</value>
+    <description>Hadoop Security</description>
+  </property>
+  <property>
+    <name>kerberos_domain</name>
+    <value>EXAMPLE.COM</value>
+    <description>Kerberos realm.</description>
+  </property>
+  <property>
+    <name>kadmin_pw</name>
+    <value></value>
+    <description>Kerberos realm admin password</description>
+  </property>
+  <property>
+    <name>keytab_path</name>
+    <value>/etc/security/keytabs</value>
+    <description>Kerberos keytab path.</description>
+  </property>
+  
+  <property>
+    <name>keytab_path</name>
+    <value>/etc/security/keytabs</value>
+    <description>KeyTab Directory.</description>
+  </property>
+    <property>
+    <name>namenode_formatted_mark_dir</name>
+    <value>/var/run/hadoop/hdfs/namenode/formatted/</value>
+    <description>Formatteed Mark Directory.</description>
+  </property>
+    <property>
+    <name>hdfs_user</name>
+    <value>hdfs</value>
+    <description>User and Groups.</description>
+  </property>
+  <property>
+    <name>lzo_enabled</name>
+    <value>true</value>
+    <description>LZO compression enabled</description>
+  </property>
+  
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/configuration/hadoop-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/configuration/hadoop-policy.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/configuration/hadoop-policy.xml
new file mode 100644
index 0000000..51b01bb
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/configuration/hadoop-policy.xml
@@ -0,0 +1,134 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+  <property>
+    <name>security.client.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ClientProtocol, which is used by user code
+    via the DistributedFileSystem.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.client.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for ClientDatanodeProtocol, the client-to-datanode protocol
+    for block recovery.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for DatanodeProtocol, which is used by datanodes to
+    communicate with the namenode.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.inter.datanode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for InterDatanodeProtocol, the inter-datanode protocol
+    for updating generation timestamp.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.namenode.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for NamenodeProtocol, the protocol used by the secondary
+    namenode to communicate with the namenode.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.inter.tracker.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for InterTrackerProtocol, used by the tasktrackers to
+    communicate with the jobtracker.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.job.client.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for JobSubmissionProtocol, used by job clients to
+    communciate with the jobtracker for job submission, querying job status etc.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.job.task.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for TaskUmbilicalProtocol, used by the map and reduce
+    tasks to communicate with the parent tasktracker.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+ <property>
+    <name>security.admin.operations.protocol.acl</name>
+    <value>hadoop</value>
+    <description>ACL for AdminOperationsProtocol. Used for admin commands.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.refresh.usertogroups.mappings.protocol.acl</name>
+    <value>hadoop</value>
+    <description>ACL for RefreshUserMappingsProtocol. Used to refresh
+    users mappings. The ACL is a comma-separated list of user and
+    group names. The user and group list is separated by a blank. For
+    e.g. "alice,bob users,wheel".  A special value of "*" means all
+    users are allowed.</description>
+  </property>
+
+<property>
+    <name>security.refresh.policy.protocol.acl</name>
+    <value>hadoop</value>
+    <description>ACL for RefreshAuthorizationPolicyProtocol, used by the
+    dfsadmin and mradmin commands to refresh the security policy in-effect.
+    The ACL is a comma-separated list of user and group names. The user and
+    group list is separated by a blank. For e.g. "alice,bob users,wheel".
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/configuration/hdfs-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/configuration/hdfs-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/configuration/hdfs-site.xml
new file mode 100644
index 0000000..7e8bfba
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/configuration/hdfs-site.xml
@@ -0,0 +1,513 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<!-- Put site-specific property overrides in this file. -->
+
+<configuration>
+
+  <!-- file system properties -->
+
+  <property>
+    <name>dfs.namenode.name.dir</name>
+    <!-- cluster variant -->
+    <value>/hadoop/hdfs/namenode</value>
+    <description>Determines where on the local filesystem the DFS name node
+      should store the name table.  If this is a comma-delimited list
+      of directories then the name table is replicated in all of the
+      directories, for redundancy. </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.support.append</name>
+    <value>true</value>
+    <description>to enable dfs append</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.webhdfs.enabled</name>
+    <value>true</value>
+    <description>Whether to enable WebHDFS feature</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.datanode.failed.volumes.tolerated</name>
+    <value>0</value>
+    <description> Number of failed disks a DataNode would tolerate before it stops offering service</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.datanode.data.dir</name>
+    <value>/hadoop/hdfs/data</value>
+    <description>Determines where on the local filesystem an DFS data node
+      should store its blocks.  If this is a comma-delimited
+      list of directories, then data will be stored in all named
+      directories, typically on different devices.
+      Directories that do not exist are ignored.
+    </description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.hosts.exclude</name>
+    <value>/etc/hadoop/conf/dfs.exclude</value>
+    <description>Names a file that contains a list of hosts that are
+      not permitted to connect to the namenode.  The full pathname of the
+      file must be specified.  If the value is empty, no hosts are
+      excluded.</description>
+  </property>
+
+  <!--
+    <property>
+      <name>dfs.hosts</name>
+      <value>/etc/hadoop/conf/dfs.include</value>
+      <description>Names a file that contains a list of hosts that are
+      permitted to connect to the namenode. The full pathname of the file
+      must be specified.  If the value is empty, all hosts are
+      permitted.</description>
+    </property>
+  -->
+
+  <property>
+    <name>dfs.namenode.checkpoint.dir</name>
+    <value>/hadoop/hdfs/namesecondary</value>
+    <description>Determines where on the local filesystem the DFS secondary
+      name node should store the temporary images to merge.
+      If this is a comma-delimited list of directories then the image is
+      replicated in all of the directories for redundancy.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.checkpoint.edits.dir</name>
+    <value>${dfs.namenode.checkpoint.dir}</value>
+    <description>Determines where on the local filesystem the DFS secondary
+      name node should store the temporary edits to merge.
+      If this is a comma-delimited list of directoires then teh edits is
+      replicated in all of the directoires for redundancy.
+      Default value is same as dfs.namenode.checkpoint.dir
+    </description>
+  </property>
+
+
+  <property>
+    <name>dfs.namenode.checkpoint.period</name>
+    <value>21600</value>
+    <description>The number of seconds between two periodic checkpoints.
+    </description>
+  </property>
+
+  <property>
+    <name>fs.checkpoint.size</name>
+    <value>67108864</value>
+    <description>The size of the current edit log (in bytes) that triggers
+      a periodic checkpoint even if the maximum checkpoint delay is not reached
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.replication.max</name>
+    <value>50</value>
+    <description>Maximal block replication.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.replication</name>
+    <value>3</value>
+    <description>Default block replication.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.heartbeat.interval</name>
+    <value>3</value>
+    <description>Determines datanode heartbeat interval in seconds.</description>
+  </property>
+
+  <property>
+    <name>dfs.heartbeat.interval</name>
+    <value>3</value>
+    <description>Determines datanode heartbeat interval in seconds.</description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.safemode.threshold-pct</name>
+    <value>1.0f</value>
+    <description>
+      Specifies the percentage of blocks that should satisfy
+      the minimal replication requirement defined by dfs.namenode.replication.min.
+      Values less than or equal to 0 mean not to start in safe mode.
+      Values greater than 1 will make safe mode permanent.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.balance.bandwidthPerSec</name>
+    <value>6250000</value>
+    <description>
+      Specifies the maximum amount of bandwidth that each datanode
+      can utilize for the balancing purpose in term of
+      the number of bytes per second.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.https.port</name>
+    <value>50470</value>
+    <description>
+      This property is used by HftpFileSystem.
+    </description>
+  </property>
+
+  <property>
+    <name>ambari.dfs.datanode.port</name>
+    <value>50010</value>
+    <description>
+      The datanode port for data transfer. This property is effective only if referenced from dfs.datanode.address property.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.address</name>
+    <value>0.0.0.0:${ambari.dfs.datanode.port}</value>
+    <description>
+      The datanode server address and port for data transfer.
+    </description>
+  </property>
+
+  <property>
+    <name>ambari.dfs.datanode.http.port</name>
+    <value>50075</value>
+    <description>
+      The datanode http port. This property is effective only if referenced from dfs.datanode.http.address property.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.http.address</name>
+    <value>0.0.0.0:${ambari.dfs.datanode.http.port}</value>
+    <description>
+      The datanode http server address and port.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.blocksize</name>
+    <value>134217728</value>
+    <description>The default block size for new files.</description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.http-address</name>
+    <value>localhost:50070</value>
+    <description>The name of the default file system.  Either the
+      literal string "local" or a host:port for NDFS.</description>
+    <final>true</final>
+  </property>
+
+  <property>
+    <name>dfs.datanode.du.reserved</name>
+    <!-- cluster variant -->
+    <value>1073741824</value>
+    <description>Reserved space in bytes per volume. Always leave this much space free for non dfs use.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.ipc.address</name>
+    <value>0.0.0.0:8010</value>
+    <description>
+      The datanode ipc server address and port.
+      If the port is 0 then the server will start on a free port.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.blockreport.initialDelay</name>
+    <value>120</value>
+    <description>Delay for first block report in seconds.</description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.handler.count</name>
+    <value>40</value>
+    <description>The number of server threads for the namenode.</description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.max.transfer.threads</name>
+    <value>1024</value>
+    <description>PRIVATE CONFIG VARIABLE</description>
+  </property>
+
+  <!-- Permissions configuration -->
+
+  <property>
+    <name>fs.permissions.umask-mode</name>
+    <value>022</value>
+    <description>
+      The octal umask used when creating files and directories.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.permissions.enabled</name>
+    <value>true</value>
+    <description>
+      If "true", enable permission checking in HDFS.
+      If "false", permission checking is turned off,
+      but all other behavior is unchanged.
+      Switching from one parameter value to the other does not change the mode,
+      owner or group of files or directories.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.permissions.superusergroup</name>
+    <value>hdfs</value>
+    <description>The name of the group of super-users.</description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.handler.count</name>
+    <value>100</value>
+    <description>Added to grow Queue size so that more client connections are allowed</description>
+  </property>
+
+  <property>
+    <name>dfs.block.access.token.enable</name>
+    <value>true</value>
+    <description>
+      If "true", access tokens are used as capabilities for accessing datanodes.
+      If "false", no access tokens are checked on accessing datanodes.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.kerberos.principal</name>
+    <value></value>
+    <description>
+      Kerberos principal name for the NameNode
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.secondary.namenode.kerberos.principal</name>
+    <value></value>
+    <description>
+      Kerberos principal name for the secondary NameNode.
+    </description>
+  </property>
+
+
+  <!--
+    This is KRB DOMAIN specific. The FQDN of the namenode has to be mentioned.
+  -->
+  <property>
+    <name>dfs.namenode.kerberos.https.principal</name>
+    <value></value>
+    <description>The Kerberos principal for the host that the NameNode runs on.</description>
+
+  </property>
+
+  <property>
+    <name>dfs.secondary.namenode.kerberos.https.principal</name>
+    <value></value>
+    <description>The Kerberos principal for the hostthat the secondary NameNode runs on.</description>
+
+  </property>
+
+  <property>
+    <!-- cluster variant -->
+    <name>dfs.namenode.secondary.http-address</name>
+    <value>localhost:50090</value>
+    <description>Address of secondary namenode web server</description>
+  </property>
+
+  <property>
+    <name>dfs.web.authentication.kerberos.principal</name>
+    <value></value>
+    <description>
+      The HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+      The HTTP Kerberos principal MUST start with 'HTTP/' per Kerberos
+      HTTP SPENGO specification.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.web.authentication.kerberos.keytab</name>
+    <value></value>
+    <description>
+      The Kerberos keytab file with the credentials for the
+      HTTP Kerberos principal used by Hadoop-Auth in the HTTP endpoint.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.kerberos.principal</name>
+    <value></value>
+    <description>
+      The Kerberos principal that the DataNode runs as. "_HOST" is replaced by the real host name.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.keytab.file</name>
+    <value></value>
+    <description>
+      Combined keytab file containing the namenode service and host principals.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.secondary.namenode.keytab.file</name>
+    <value></value>
+    <description>
+      Combined keytab file containing the namenode service and host principals.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.datanode.keytab.file</name>
+    <value></value>
+    <description>
+      The filename of the keytab file for the DataNode.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.https-address</name>
+    <value>localhost:50470</value>
+    <description>The https address where namenode binds</description>
+
+  </property>
+
+  <property>
+    <name>dfs.datanode.data.dir.perm</name>
+    <value>750</value>
+    <description>The permissions that should be there on dfs.datanode.data.dir
+      directories. The datanode will not come up if the permissions are
+      different on existing dfs.datanode.data.dir directories. If the directories
+      don't exist, they will be created with this permission.</description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.accesstime.precision</name>
+    <value>0</value>
+    <description>The access time for HDFS file is precise upto this value.
+      The default value is 1 hour. Setting a value of 0 disables
+      access times for HDFS.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.cluster.administrators</name>
+    <value> hdfs</value>
+    <description>ACL for who all can view the default servlets in the HDFS</description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.avoid.read.stale.datanode</name>
+    <value>true</value>
+    <description>
+      Indicate whether or not to avoid reading from stale datanodes whose
+      heartbeat messages have not been received by the namenode for more than a
+      specified time interval.
+    </description>
+  </property>
+  <property>
+    <name>dfs.namenode.avoid.write.stale.datanode</name>
+    <value>true</value>
+    <description>
+      Indicate whether or not to avoid writing to stale datanodes whose
+      heartbeat messages have not been received by the namenode for more than a
+      specified time interval.
+    </description>
+  </property>
+  <property>
+    <name>dfs.namenode.write.stale.datanode.ratio</name>
+    <value>1.0f</value>
+    <description>When the ratio of number stale datanodes to total datanodes marked is greater
+      than this ratio, stop avoiding writing to stale nodes so as to prevent causing hotspots.
+    </description>
+  </property>
+  <property>
+    <name>dfs.namenode.stale.datanode.interval</name>
+    <value>30000</value>
+    <description>Datanode is stale after not getting a heartbeat in this interval in ms</description>
+  </property>
+
+  <property>
+    <name>dfs.journalnode.http-address</name>
+    <value>0.0.0.0:8480</value>
+    <description>The address and port the JournalNode web UI listens on.
+      If the port is 0 then the server will start on a free port. </description>
+  </property>
+
+  <property>
+    <name>dfs.journalnode.edits.dir</name>
+    <value>/grid/0/hdfs/journal</value>
+    <description>The path where the JournalNode daemon will store its local state. </description>
+  </property>
+
+  <!-- HDFS Short-Circuit Local Reads -->
+
+  <property>
+    <name>dfs.client.read.shortcircuit</name>
+    <value>true</value>
+    <description>
+      This configuration parameter turns on short-circuit local reads.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.client.read.shortcircuit.skip.checksum</name>
+    <value></value>
+    <description>Enable/disbale skipping the checksum check</description>
+  </property>
+
+  <property>
+    <name>dfs.domain.socket.path</name>
+    <value>/var/lib/hadoop-hdfs/dn_socket</value>
+  </property>
+
+  <property>
+    <name>dfs.client.read.shortcircuit.streams.cache.size</name>
+    <value>4096</value>
+    <description>
+      The DFSClient maintains a cache of recently opened file descriptors. This
+      parameter controls the size of that cache. Setting this higher will use
+      more file descriptors, but potentially provide better performance on
+      workloads involving lots of seeks.
+    </description>
+  </property>
+
+  <property>
+    <name>dfs.namenode.name.dir.restore</name>
+    <value>true</value>
+    <description>Set to true to enable NameNode to attempt recovering a previously failed dfs.namenode.name.dir.
+      When enabled, a recovery of any failed directory is attempted during checkpoint.</description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/metainfo.xml
new file mode 100644
index 0000000..8149bc2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/metainfo.xml
@@ -0,0 +1,152 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HDFS</name>
+      <comment>Apache Hadoop Distributed File System</comment>
+      <version>2.1.0.2.1.1</version>
+
+      <components>
+        <component>
+          <name>NAMENODE</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/namenode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <customCommands>
+            <customCommand>
+              <name>DECOMMISSION</name>
+              <commandScript>
+                <script>scripts/namenode.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+          </customCommands>
+        </component>
+
+        <component>
+          <name>DATANODE</name>
+          <category>SLAVE</category>
+          <commandScript>
+            <script>scripts/datanode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>SECONDARY_NAMENODE</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/snamenode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>HDFS_CLIENT</name>
+          <category>CLIENT</category>
+          <commandScript>
+            <script>scripts/hdfs_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>JOURNALNODE</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/journalnode.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>ZKFC</name>
+          <category>SLAVE</category>
+          <commandScript>
+            <script>scripts/zkfc_slave.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>lzo</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-libhdfs</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-lzo</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>hadoop-lzo-native</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>snappy</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>snappy-devel</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>ambari-log4j</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>core-site</config-type>
+        <config-type>global</config-type>
+        <config-type>hdfs-site</config-type>
+        <config-type>hadoop-policy</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>


[36/37] AMBARI-4341. Rename 2.0.8 to 2.1.1 in the stack definition. (mahadev)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/commons-logging.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/commons-logging.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/commons-logging.properties.j2
new file mode 100644
index 0000000..77e458f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/commons-logging.properties.j2
@@ -0,0 +1,25 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+#Logging Implementation
+
+#Log4J
+org.apache.commons.logging.Log=org.apache.commons.logging.impl.Log4JLogger
+
+#JDK Logger
+#org.apache.commons.logging.Log=org.apache.commons.logging.impl.Jdk14Logger

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/exclude_hosts_list.j2
new file mode 100644
index 0000000..bb5795b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/exclude_hosts_list.j2
@@ -0,0 +1,3 @@
+{% for host in hdfs_exclude_file %}
+{{host}}
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/hadoop-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/hadoop-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/hadoop-env.sh.j2
new file mode 100644
index 0000000..51e2bac
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/hadoop-env.sh.j2
@@ -0,0 +1,121 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.  Required.
+export JAVA_HOME={{java_home}}
+export HADOOP_HOME_WARN_SUPPRESS=1
+
+# Hadoop Configuration Directory
+#TODO: if env var set that can cause problems
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
+
+# this is different for HDP1 #
+# Path to jsvc required by secure HDP 2.0 datanode
+# export JSVC_HOME={{jsvc_path}}
+
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+
+export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
+
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
+
+# Command specific options appended to HADOOP_OPTS when specified
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
+
+HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
+HADOOP_DATANODE_OPTS="-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
+HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
+
+export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+# On secure datanodes, user to run the datanode as after dropping privileges
+export HADOOP_SECURE_DN_USER={{hdfs_user}}
+
+# Extra ssh options.  Empty by default.
+export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
+
+# History server logs
+export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
+
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
+# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+# host:path where hadoop code should be rsync'd from.  Unset by default.
+# export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+# Seconds to sleep between slave commands.  Unset by default.  This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HADOOP_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
+export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# History server pid
+export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
+
+YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
+
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes.  See 'man nice'.
+
+# export HADOOP_NICENESS=10
+
+# Use libraries from standard classpath
+JAVA_JDBC_LIBS=""
+#Add libraries required by mysql connector
+for jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by oracle connector
+for jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by nodemanager
+MAPREDUCE_LIBS={{mapreduce_libs_path}}
+export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
+
+# Setting path to hdfs command line
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+
+#Mostly required for hadoop 2.0
+export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/hadoop-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/hadoop-metrics2.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/hadoop-metrics2.properties.j2
new file mode 100644
index 0000000..a6a66ef
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/hadoop-metrics2.properties.j2
@@ -0,0 +1,45 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# syntax: [prefix].[source|sink|jmx].[instance].[options]
+# See package.html for org.apache.hadoop.metrics2 for details
+
+{% if has_ganglia_server %}
+*.period=60
+
+*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
+*.sink.ganglia.period=10
+
+# default for supportsparse is false
+*.sink.ganglia.supportsparse=true
+
+.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
+.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+
+# Hook up to the server
+namenode.sink.ganglia.servers={{ganglia_server_host}}:8661
+datanode.sink.ganglia.servers={{ganglia_server_host}}:8659
+jobtracker.sink.ganglia.servers={{ganglia_server_host}}:8662
+tasktracker.sink.ganglia.servers={{ganglia_server_host}}:8658
+maptask.sink.ganglia.servers={{ganglia_server_host}}:8660
+reducetask.sink.ganglia.servers={{ganglia_server_host}}:8660
+resourcemanager.sink.ganglia.servers={{ganglia_server_host}}:8664
+nodemanager.sink.ganglia.servers={{ganglia_server_host}}:8657
+historyserver.sink.ganglia.servers={{ganglia_server_host}}:8666
+journalnode.sink.ganglia.servers={{ganglia_server_host}}:8654
+
+resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
+
+{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/hdfs.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/hdfs.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/hdfs.conf.j2
new file mode 100644
index 0000000..ca7baa2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/hdfs.conf.j2
@@ -0,0 +1,17 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+{{hdfs_user}}   - nofile 32768
+{{hdfs_user}}   - nproc  65536

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/health_check-v2.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/health_check-v2.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/health_check-v2.j2
new file mode 100644
index 0000000..cb7b12b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/health_check-v2.j2
@@ -0,0 +1,91 @@
+#!/bin/bash
+#
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+err=0;
+
+function check_disks {
+
+  for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do
+    fsdev=""
+    fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`;
+    if [ -z "$fsdev" -a "$m" != "/mnt" ] ; then
+      msg_="$msg_ $m(u)"
+    else
+      msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`"
+    fi
+  done
+
+  if [ -z "$msg_" ] ; then
+    echo "disks ok" ; exit 0
+  else
+    echo "$msg_" ; exit 2
+  fi
+
+}
+
+function check_link {
+  snmp=/usr/bin/snmpwalk
+  if [ -e $snmp ] ; then
+    $snmp -t 5 -Oe  -Oq  -Os -v 1 -c public localhost if | \
+    awk ' {
+      split($1,a,".") ;
+      if ( a[1] == "ifIndex" ) { ifIndex[a[2]] = $2 }
+      if ( a[1] == "ifDescr" ) { ifDescr[a[2]] = $2 }
+      if ( a[1] == "ifType" ) { ifType[a[2]] = $2 }
+      if ( a[1] == "ifSpeed" ) { ifSpeed[a[2]] = $2 }
+      if ( a[1] == "ifAdminStatus" ) { ifAdminStatus[a[2]] = $2 }
+      if ( a[1] == "ifOperStatus" ) { ifOperStatus[a[2]] = $2 }
+    }
+    END {
+      up=0;
+      for (i in ifIndex ) {
+      if ( ifType[i] == 6 && ifAdminStatus[i] == 1 && ifOperStatus[i] == 1 && ifSpeed[i] == 1000000000 ) {
+      up=i;
+      }
+      }
+      if ( up == 0 ) { print "check link" ; exit 2 }
+      else { print ifDescr[up],"ok" }
+    }'
+    exit $? ;
+  fi
+}
+
+# Run all checks
+# Disabled 'check_link' for now... 
+for check in disks ; do
+  msg=`check_${check}` ;
+  if [ $? -eq 0 ] ; then
+    ok_msg="$ok_msg$msg,"
+  else
+    err_msg="$err_msg$msg,"
+  fi
+done
+
+if [ ! -z "$err_msg" ] ; then
+  echo -n "ERROR $err_msg "
+fi
+if [ ! -z "$ok_msg" ] ; then
+  echo -n "OK: $ok_msg"
+fi
+
+echo
+
+# Success!
+exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/health_check.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/health_check.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/health_check.j2
new file mode 100644
index 0000000..b84b336
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/health_check.j2
@@ -0,0 +1,118 @@
+#!/bin/bash
+#
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+err=0;
+
+function check_disks {
+
+  for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do
+    fsdev=""
+    fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`;
+    if [ -z "$fsdev" ] ; then
+      msg_="$msg_ $m(u)"
+    else
+      msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`"
+    fi
+  done
+
+  if [ -z "$msg_" ] ; then
+    echo "disks ok" ; exit 0
+  else
+    echo "$msg_" ; exit 2
+  fi
+
+}
+
+function check_taskcontroller {
+  if [ "<%=scope.function_hdp_template_var("::hdp::params::security_enabled")%>" == "true" ]; then
+    perm=`stat -c %a:%U:%G <%=scope.function_hdp_template_var("task_bin_exe")%> 2>/dev/null`
+    if [ $? -eq 0 ] && [ "$perm" == "6050:root:hadoop" ] ; then
+      echo "taskcontroller ok"
+    else
+      echo 'check taskcontroller' ; exit 1
+    fi
+  fi
+}
+
+function check_jetty {
+  hname=`hostname`
+  jmx=`curl -s -S -m 5 "http://$hname:<%=scope.function_hdp_template_var("::hdp::tasktracker_port")%>/jmx?qry=Hadoop:service=TaskTracker,name=ShuffleServerMetrics" 2>/dev/null` ;
+  if [ $? -eq 0 ] ; then
+    e=`echo $jmx | awk '/shuffle_exceptions_caught/ {printf"%d",$2}'` ;
+    e=${e:-0} # no jmx servlet ?
+    if [ $e -gt 10 ] ; then
+      echo "check jetty: shuffle_exceptions=$e" ; exit 1
+    else
+      echo "jetty ok"
+    fi
+  else
+    echo "check jetty: ping failed" ; exit 1
+  fi
+}
+
+function check_link {
+  snmp=/usr/bin/snmpwalk
+  if [ -e $snmp ] ; then
+    $snmp -t 5 -Oe  -Oq  -Os -v 1 -c public localhost if | \
+    awk ' {
+      split($1,a,".") ;
+      if ( a[1] == "ifIndex" ) { ifIndex[a[2]] = $2 }
+      if ( a[1] == "ifDescr" ) { ifDescr[a[2]] = $2 }
+      if ( a[1] == "ifType" ) { ifType[a[2]] = $2 }
+      if ( a[1] == "ifSpeed" ) { ifSpeed[a[2]] = $2 }
+      if ( a[1] == "ifAdminStatus" ) { ifAdminStatus[a[2]] = $2 }
+      if ( a[1] == "ifOperStatus" ) { ifOperStatus[a[2]] = $2 }
+    }
+    END {
+      up=0;
+      for (i in ifIndex ) {
+      if ( ifType[i] == 6 && ifAdminStatus[i] == 1 && ifOperStatus[i] == 1 && ifSpeed[i] == 1000000000 ) {
+      up=i;
+      }
+      }
+      if ( up == 0 ) { print "check link" ; exit 2 }
+      else { print ifDescr[up],"ok" }
+    }'
+    exit $? ;
+  fi
+}
+
+# Run all checks
+# Disabled 'check_link' for now... 
+for check in disks taskcontroller jetty; do
+  msg=`check_${check}` ;
+  if [ $? -eq 0 ] ; then
+    ok_msg="$ok_msg$msg,"
+  else
+    err_msg="$err_msg$msg,"
+  fi
+done
+
+if [ ! -z "$err_msg" ] ; then
+  echo -n "ERROR $err_msg "
+fi
+if [ ! -z "$ok_msg" ] ; then
+  echo -n "OK: $ok_msg"
+fi
+
+echo
+
+# Success!
+exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/include_hosts_list.j2
new file mode 100644
index 0000000..cbcf6c3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/include_hosts_list.j2
@@ -0,0 +1,3 @@
+{% for host in slave_hosts %}
+{{host}}
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/log4j.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/log4j.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/log4j.properties.j2
new file mode 100644
index 0000000..577ad04
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/log4j.properties.j2
@@ -0,0 +1,200 @@
+# Copyright 2011 The Apache Software Foundation
+# 
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Define some default values that can be overridden by system properties
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this 
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+#Security audit appender
+#
+hadoop.security.logger=INFO,console
+hadoop.security.log.maxfilesize=256MB
+hadoop.security.log.maxbackupindex=20
+log4j.category.SecurityLogger=${hadoop.security.logger}
+hadoop.security.log.file=SecurityAuth.audit
+log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender 
+log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
+
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender 
+log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
+log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
+
+#
+# hdfs audit logging
+#
+hdfs.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
+log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
+log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
+log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
+
+#
+# mapred audit logging
+#
+mapred.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
+log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
+log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
+log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
+
+#
+# Rolling File Appender
+#
+
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Logfile size and and 30-day backups
+log4j.appender.RFA.MaxFileSize=256MB
+log4j.appender.RFA.MaxBackupIndex=10
+
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+# Custom Logging levels
+
+hadoop.metrics.log.level=INFO
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+#
+# Null Appender
+# Trap security logger on the hadoop client side
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+
+{% if is_jtnode_master or is_rmnode_master %}
+#
+# Job Summary Appender 
+#
+# Use following logger to send summary to separate file defined by 
+# hadoop.mapreduce.jobsummary.log.file rolled daily:
+# hadoop.mapreduce.jobsummary.logger=INFO,JSA
+# 
+hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
+hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
+log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender
+
+log4j.appender.JSA.File={{hdfs_log_dir_prefix}}/{{mapred_user}}/${hadoop.mapreduce.jobsummary.log.file}
+
+log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
+log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+log4j.appender.JSA.DatePattern=.yyyy-MM-dd
+log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
+log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
+log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
+{% endif %}
+
+{{rca_prefix}}ambari.jobhistory.database={{ambari_db_rca_url}}
+{{rca_prefix}}ambari.jobhistory.driver={{ambari_db_rca_driver}}
+{{rca_prefix}}ambari.jobhistory.user={{ambari_db_rca_username}}
+{{rca_prefix}}ambari.jobhistory.password={{ambari_db_rca_password}}
+{{rca_prefix}}ambari.jobhistory.logger=DEBUG,JHA
+
+{{rca_prefix}}log4j.appender.JHA=org.apache.ambari.log4j.hadoop.mapreduce.jobhistory.JobHistoryAppender
+{{rca_prefix}}log4j.appender.JHA.database=${ambari.jobhistory.database}
+{{rca_prefix}}log4j.appender.JHA.driver=${ambari.jobhistory.driver}
+{{rca_prefix}}log4j.appender.JHA.user=${ambari.jobhistory.user}
+{{rca_prefix}}log4j.appender.JHA.password=${ambari.jobhistory.password}
+
+{{rca_prefix}}log4j.logger.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger=${ambari.jobhistory.logger}
+{{rca_prefix}}log4j.additivity.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger=true

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/slaves.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/slaves.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/slaves.j2
new file mode 100644
index 0000000..cbcf6c3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/slaves.j2
@@ -0,0 +1,3 @@
+{% for host in slave_hosts %}
+{{host}}
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/snmpd.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/snmpd.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/snmpd.conf.j2
new file mode 100644
index 0000000..3530444
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/snmpd.conf.j2
@@ -0,0 +1,48 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+com2sec notConfigUser  {{snmp_source}}   {{snmp_community}}
+group   notConfigGroup v1           notConfigUser
+group   notConfigGroup v2c           notConfigUser
+view    systemview    included   .1
+access  notConfigGroup ""      any       noauth    exact  systemview none none
+
+syslocation Hadoop 
+syscontact HadoopMaster 
+dontLogTCPWrappersConnects yes
+
+###############################################################################
+# disk checks
+
+disk / 10000
+
+
+###############################################################################
+# load average checks
+#
+
+# load [1MAX=12.0] [5MAX=12.0] [15MAX=12.0]
+#
+# 1MAX:   If the 1 minute load average is above this limit at query
+#         time, the errorFlag will be set.
+# 5MAX:   Similar, but for 5 min average.
+# 15MAX:  Similar, but for 15 min average.
+
+# Check for loads:
+#load 12 14 14
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/taskcontroller.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/taskcontroller.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/taskcontroller.cfg.j2
new file mode 100644
index 0000000..d01d37e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/hooks/before-START/templates/taskcontroller.cfg.j2
@@ -0,0 +1,20 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+mapred.local.dir={{mapred_local_dir}}
+mapreduce.tasktracker.group={{mapred_tt_group}}
+hadoop.log.dir={{hdfs_log_dir_prefix}}/{{mapred_user}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/checkGmetad.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/checkGmetad.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/checkGmetad.sh
new file mode 100644
index 0000000..e60eb31
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/checkGmetad.sh
@@ -0,0 +1,37 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+source ./gmetadLib.sh;
+
+# Before checking gmetad, check rrdcached.
+./checkRrdcached.sh;
+
+gmetadRunningPid=`getGmetadRunningPid`;
+
+if [ -n "${gmetadRunningPid}" ]
+then
+  echo "${GMETAD_BIN} running with PID ${gmetadRunningPid}";
+else
+  echo "Failed to find running ${GMETAD_BIN}";
+  exit 1;
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/checkGmond.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/checkGmond.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/checkGmond.sh
new file mode 100644
index 0000000..0cec8dc
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/checkGmond.sh
@@ -0,0 +1,62 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
+source ./gmondLib.sh;
+
+function checkGmondForCluster()
+{
+    gmondClusterName=${1};
+
+    gmondCoreConfFileName=`getGmondCoreConfFileName ${gmondClusterName}`;
+
+    # Skip over (purported) Clusters that don't have their core conf file present.
+    if [ -e "${gmondCoreConfFileName}" ]
+    then 
+      gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
+
+      if [ -n "${gmondRunningPid}" ]
+      then
+        echo "${GMOND_BIN} for cluster ${gmondClusterName} running with PID ${gmondRunningPid}";
+      else
+        echo "Failed to find running ${GMOND_BIN} for cluster ${gmondClusterName}";
+        exit 1;
+      fi
+    fi
+}
+
+# main()
+gmondClusterName=${1};
+
+if [ "x" == "x${gmondClusterName}" ]
+then
+    # No ${gmondClusterName} passed in as command-line arg, so check
+    # all the gmonds we know about.
+    for gmondClusterName in `getConfiguredGangliaClusterNames`
+    do
+        checkGmondForCluster ${gmondClusterName};
+    done
+else
+    # Just check the one ${gmondClusterName} that was asked for.
+    checkGmondForCluster ${gmondClusterName};
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/checkRrdcached.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/checkRrdcached.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/checkRrdcached.sh
new file mode 100644
index 0000000..d94db5d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/checkRrdcached.sh
@@ -0,0 +1,34 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+source ./rrdcachedLib.sh;
+
+rrdcachedRunningPid=`getRrdcachedRunningPid`;
+
+if [ -n "${rrdcachedRunningPid}" ]
+then
+  echo "${RRDCACHED_BIN} running with PID ${rrdcachedRunningPid}";
+else
+  echo "Failed to find running ${RRDCACHED_BIN}";
+  exit 1;
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/gmetad.init
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/gmetad.init b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/gmetad.init
new file mode 100644
index 0000000..20b388e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/gmetad.init
@@ -0,0 +1,73 @@
+#!/bin/sh
+# chkconfig: 2345 70 40
+# description: hdp-gmetad startup script
+# processname: hdp-gmetad
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Remember to keep this in-sync with the definition of 
+# GANGLIA_RUNTIME_COMPONENTS_UNPACK_DIR in monrpmInstaller.sh.
+HDP_GANGLIA_RUNTIME_COMPONENTS_DIR=/usr/libexec/hdp/ganglia
+HDP_GANLIA_GMETAD_STARTER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/startGmetad.sh
+HDP_GANLIA_GMETAD_STOPPER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/stopGmetad.sh
+HDP_GANLIA_GMETAD_CHECKER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/checkGmetad.sh
+
+RETVAL=0
+
+case "$1" in
+   start)
+      echo "============================="
+      echo "Starting hdp-gmetad..."
+      echo "============================="
+      [ -f ${HDP_GANLIA_GMETAD_STARTER} ] || exit 1
+      eval "${HDP_GANLIA_GMETAD_STARTER}"
+      RETVAL=$?
+      echo
+      [ $RETVAL -eq 0 ] && touch /var/lock/subsys/hdp-gmetad
+      ;;
+
+  stop)
+      echo "=================================="
+      echo "Shutting down hdp-gmetad..."
+      echo "=================================="
+      [ -f ${HDP_GANLIA_GMETAD_STOPPER} ] || exit 1
+      eval "${HDP_GANLIA_GMETAD_STOPPER}"
+      RETVAL=$?
+      echo
+      [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hdp-gmetad
+      ;;
+
+  restart|reload)
+   	$0 stop
+   	$0 start
+   	RETVAL=$?
+	;;
+  status)
+      echo "======================================="
+      echo "Checking status of hdp-gmetad..."
+      echo "======================================="
+      [ -f ${HDP_GANLIA_GMETAD_CHECKER} ] || exit 1
+      eval "${HDP_GANLIA_GMETAD_CHECKER}"
+      RETVAL=$?
+      ;;
+  *)
+	echo "Usage: $0 {start|stop|restart|status}"
+	exit 1
+esac
+
+exit $RETVAL

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/gmetadLib.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/gmetadLib.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/gmetadLib.sh
new file mode 100644
index 0000000..e28610e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/gmetadLib.sh
@@ -0,0 +1,204 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Slurp in all our user-customizable settings.
+source ./gangliaEnv.sh;
+
+# Get access to Ganglia-wide constants etc.
+source ./gangliaLib.sh;
+
+GMETAD_BIN=/usr/sbin/gmetad;
+GMETAD_CONF_FILE=${GANGLIA_CONF_DIR}/gmetad.conf;
+GMETAD_PID_FILE=${GANGLIA_RUNTIME_DIR}/gmetad.pid;
+
+function getGmetadLoggedPid()
+{
+    if [ -e "${GMETAD_PID_FILE}" ]
+    then
+        echo `cat ${GMETAD_PID_FILE}`;
+    fi
+}
+
+function getGmetadRunningPid()
+{
+    gmetadLoggedPid=`getGmetadLoggedPid`;
+
+    if [ -n "${gmetadLoggedPid}" ]
+    then
+        echo `ps -o pid=MYPID -p ${gmetadLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
+    fi
+}
+
+function generateGmetadConf()
+{
+    now=`date`;
+
+    cat <<END_OF_GMETAD_CONF_1
+#################### Generated by ${0} on ${now} ####################
+#
+#-------------------------------------------------------------------------------
+# Setting the debug_level to 1 will keep daemon in the forground and
+# show only error messages. Setting this value higher than 1 will make 
+# gmetad output debugging information and stay in the foreground.
+# default: 0
+# debug_level 10
+#
+#-------------------------------------------------------------------------------
+# What to monitor. The most important section of this file. 
+#
+# The data_source tag specifies either a cluster or a grid to
+# monitor. If we detect the source is a cluster, we will maintain a complete
+# set of RRD databases for it, which can be used to create historical 
+# graphs of the metrics. If the source is a grid (it comes from another gmetad),
+# we will only maintain summary RRDs for it.
+#
+# Format: 
+# data_source "my cluster" [polling interval] address1:port addreses2:port ...
+# 
+# The keyword 'data_source' must immediately be followed by a unique
+# string which identifies the source, then an optional polling interval in 
+# seconds. The source will be polled at this interval on average. 
+# If the polling interval is omitted, 15sec is asssumed. 
+#
+# If you choose to set the polling interval to something other than the default,
+# note that the web frontend determines a host as down if its TN value is less
+# than 4 * TMAX (20sec by default).  Therefore, if you set the polling interval
+# to something around or greater than 80sec, this will cause the frontend to
+# incorrectly display hosts as down even though they are not.
+#
+# A list of machines which service the data source follows, in the 
+# format ip:port, or name:port. If a port is not specified then 8649
+# (the default gmond port) is assumed.
+# default: There is no default value
+#
+# data_source "my cluster" 10 localhost  my.machine.edu:8649  1.2.3.5:8655
+# data_source "my grid" 50 1.3.4.7:8655 grid.org:8651 grid-backup.org:8651
+# data_source "another source" 1.3.4.7:8655  1.3.4.8
+END_OF_GMETAD_CONF_1
+
+    # Get info about all the configured Ganglia clusters.
+    getGangliaClusterInfo | while read gangliaClusterInfoLine
+    do
+        # From each, parse out ${gmondClusterName}, ${gmondMasterIP} and ${gmondPort}... 
+        read gmondClusterName gmondMasterIP gmondPort <<<`echo ${gangliaClusterInfoLine}`;
+        # ...and generate a corresponding data_source line for gmetad.conf. 
+        echo "data_source \"${gmondClusterName}\" ${gmondMasterIP}:${gmondPort}";
+    done
+
+    cat <<END_OF_GMETAD_CONF_2
+#
+# Round-Robin Archives
+# You can specify custom Round-Robin archives here (defaults are listed below)
+#
+# Old Default RRA: Keep 1 hour of metrics at 15 second resolution. 1 day at 6 minute
+# RRAs "RRA:AVERAGE:0.5:1:244" "RRA:AVERAGE:0.5:24:244" "RRA:AVERAGE:0.5:168:244" "RRA:AVERAGE:0.5:672:244" \
+#      "RRA:AVERAGE:0.5:5760:374"
+# New Default RRA
+# Keep 5856 data points at 15 second resolution assuming 15 second (default) polling. That's 1 day
+# Two weeks of data points at 1 minute resolution (average)
+#RRAs "RRA:AVERAGE:0.5:1:5856" "RRA:AVERAGE:0.5:4:20160" "RRA:AVERAGE:0.5:40:52704"
+# Retaining existing resolution
+RRAs "RRA:AVERAGE:0.5:1:244" "RRA:AVERAGE:0.5:24:244" "RRA:AVERAGE:0.5:168:244" "RRA:AVERAGE:0.5:672:244" \
+     "RRA:AVERAGE:0.5:5760:374"
+#
+#-------------------------------------------------------------------------------
+# Scalability mode. If on, we summarize over downstream grids, and respect
+# authority tags. If off, we take on 2.5.0-era behavior: we do not wrap our output
+# in <GRID></GRID> tags, we ignore all <GRID> tags we see, and always assume
+# we are the "authority" on data source feeds. This approach does not scale to
+# large groups of clusters, but is provided for backwards compatibility.
+# default: on
+# scalable off
+#
+#-------------------------------------------------------------------------------
+# The name of this Grid. All the data sources above will be wrapped in a GRID
+# tag with this name.
+# default: unspecified
+gridname "HDP_GRID"
+#
+#-------------------------------------------------------------------------------
+# The authority URL for this grid. Used by other gmetads to locate graphs
+# for our data sources. Generally points to a ganglia/
+# website on this machine.
+# default: "http://hostname/ganglia/",
+#   where hostname is the name of this machine, as defined by gethostname().
+# authority "http://mycluster.org/newprefix/"
+#
+#-------------------------------------------------------------------------------
+# List of machines this gmetad will share XML with. Localhost
+# is always trusted. 
+# default: There is no default value
+# trusted_hosts 127.0.0.1 169.229.50.165 my.gmetad.org
+#
+#-------------------------------------------------------------------------------
+# If you want any host which connects to the gmetad XML to receive
+# data, then set this value to "on"
+# default: off
+# all_trusted on
+#
+#-------------------------------------------------------------------------------
+# If you don't want gmetad to setuid then set this to off
+# default: on
+# setuid off
+#
+#-------------------------------------------------------------------------------
+# User gmetad will setuid to (defaults to "nobody")
+# default: "nobody"
+setuid_username "${GMETAD_USER}"
+#
+#-------------------------------------------------------------------------------
+# Umask to apply to created rrd files and grid directory structure
+# default: 0 (files are public)
+# umask 022
+#
+#-------------------------------------------------------------------------------
+# The port gmetad will answer requests for XML
+# default: 8651
+# xml_port 8651
+#
+#-------------------------------------------------------------------------------
+# The port gmetad will answer queries for XML. This facility allows
+# simple subtree and summation views of the XML tree.
+# default: 8652
+# interactive_port 8652
+#
+#-------------------------------------------------------------------------------
+# The number of threads answering XML requests
+# default: 4
+# server_threads 10
+#
+#-------------------------------------------------------------------------------
+# Where gmetad stores its round-robin databases
+# default: "/var/lib/ganglia/rrds"
+# rrd_rootdir "/some/other/place"
+#
+#-------------------------------------------------------------------------------
+# In earlier versions of gmetad, hostnames were handled in a case
+# sensitive manner
+# If your hostname directories have been renamed to lower case,
+# set this option to 0 to disable backward compatibility.
+# From version 3.2, backwards compatibility will be disabled by default.
+# default: 1   (for gmetad < 3.2)
+# default: 0   (for gmetad >= 3.2)
+case_sensitive_hostnames 1
+END_OF_GMETAD_CONF_2
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/gmond.init
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/gmond.init b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/gmond.init
new file mode 100644
index 0000000..afb7026
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/gmond.init
@@ -0,0 +1,73 @@
+#!/bin/sh
+# chkconfig: 2345 70 40
+# description: hdp-gmond startup script
+# processname: hdp-gmond
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Remember to keep this in-sync with the definition of 
+# GANGLIA_RUNTIME_COMPONENTS_UNPACK_DIR in monrpmInstaller.sh.
+HDP_GANGLIA_RUNTIME_COMPONENTS_DIR=/usr/libexec/hdp/ganglia
+HDP_GANLIA_GMOND_STARTER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/startGmond.sh
+HDP_GANLIA_GMOND_STOPPER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/stopGmond.sh
+HDP_GANLIA_GMOND_CHECKER=${HDP_GANGLIA_RUNTIME_COMPONENTS_DIR}/checkGmond.sh
+
+RETVAL=0
+
+case "$1" in
+   start)
+      echo "============================="
+      echo "Starting hdp-gmond..."
+      echo "============================="
+      [ -f ${HDP_GANLIA_GMOND_STARTER} ] || exit 1
+      eval "${HDP_GANLIA_GMOND_STARTER}"
+      RETVAL=$?
+      echo
+      [ $RETVAL -eq 0 ] && touch /var/lock/subsys/hdp-gmond
+      ;;
+
+  stop)
+      echo "=================================="
+      echo "Shutting down hdp-gmond..."
+      echo "=================================="
+      [ -f ${HDP_GANLIA_GMOND_STOPPER} ] || exit 1
+      eval "${HDP_GANLIA_GMOND_STOPPER}"
+      RETVAL=$?
+      echo
+      [ $RETVAL -eq 0 ] && rm -f /var/lock/subsys/hdp-gmond
+      ;;
+
+  restart|reload)
+   	$0 stop
+   	$0 start
+   	RETVAL=$?
+	;;
+  status)
+      echo "======================================="
+      echo "Checking status of hdp-gmond..."
+      echo "======================================="
+      [ -f ${HDP_GANLIA_GMOND_CHECKER} ] || exit 1
+      eval "${HDP_GANLIA_GMOND_CHECKER}"
+      RETVAL=$?
+      ;;
+  *)
+	echo "Usage: $0 {start|stop|restart|status}"
+	exit 1
+esac
+
+exit $RETVAL

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/gmondLib.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/gmondLib.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/gmondLib.sh
new file mode 100644
index 0000000..87da4dd
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/gmondLib.sh
@@ -0,0 +1,545 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Slurp in all our user-customizable settings.
+source ./gangliaEnv.sh;
+
+# Get access to Ganglia-wide constants etc.
+source ./gangliaLib.sh;
+
+GMOND_BIN=/usr/sbin/gmond;
+GMOND_CORE_CONF_FILE=gmond.core.conf;
+GMOND_MASTER_CONF_FILE=gmond.master.conf;
+GMOND_SLAVE_CONF_FILE=gmond.slave.conf;
+GMOND_PID_FILE=gmond.pid;
+
+# Functions.
+function getGmondCoreConfFileName()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        # ${clusterName} is not empty. 
+        echo "${GANGLIA_CONF_DIR}/${clusterName}/${GMOND_CORE_CONF_FILE}";
+    else
+        echo "${GANGLIA_CONF_DIR}/${GMOND_CORE_CONF_FILE}";
+    fi
+}
+
+function getGmondMasterConfFileName()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        # ${clusterName} is not empty. 
+        echo "${GANGLIA_CONF_DIR}/${clusterName}/conf.d/${GMOND_MASTER_CONF_FILE}";
+    else
+        echo "${GANGLIA_CONF_DIR}/conf.d/${GMOND_MASTER_CONF_FILE}";
+    fi
+}
+
+function getGmondSlaveConfFileName()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        # ${clusterName} is not empty. 
+        echo "${GANGLIA_CONF_DIR}/${clusterName}/conf.d/${GMOND_SLAVE_CONF_FILE}";
+    else
+        echo "${GANGLIA_CONF_DIR}/conf.d/${GMOND_SLAVE_CONF_FILE}";
+    fi
+}
+
+function getGmondPidFileName()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        # ${clusterName} is not empty. 
+        echo "${GANGLIA_RUNTIME_DIR}/${clusterName}/${GMOND_PID_FILE}";
+    else
+        echo "${GANGLIA_RUNTIME_DIR}/${GMOND_PID_FILE}";
+    fi
+}
+
+function getGmondLoggedPid()
+{
+    gmondPidFile=`getGmondPidFileName ${1}`;
+
+    if [ -e "${gmondPidFile}" ]
+    then
+        echo `cat ${gmondPidFile}`;
+    fi
+}
+
+function getGmondRunningPid()
+{
+    gmondLoggedPid=`getGmondLoggedPid ${1}`;
+
+    if [ -n "${gmondLoggedPid}" ]
+    then
+        echo `ps -o pid=MYPID -p ${gmondLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
+    fi
+}
+
+function generateGmondCoreConf()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
+
+        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
+        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
+        then
+            now=`date`;
+
+            cat << END_OF_GMOND_CORE_CONF
+#################### Generated by ${0} on ${now} ####################
+#
+/* This configuration is as close to 2.5.x default behavior as possible
+   The values closely match ./gmond/metric.h definitions in 2.5.x */
+globals {
+  daemonize = yes
+  setuid = yes
+  user = ${GMOND_USER}
+  debug_level = 0
+  max_udp_msg_len = 1472
+  mute = no
+  deaf = no 
+  allow_extra_data = yes
+  host_dmax = 0 /*secs */
+  host_tmax = 20 /*secs */
+  cleanup_threshold = 300 /*secs */
+  gexec = no
+  send_metadata_interval = 30 /*secs */
+}
+
+/*
+ * The cluster attributes specified will be used as part of the <CLUSTER>
+ * tag that will wrap all hosts collected by this instance.
+ */
+cluster {
+  name = "${gmondClusterName}"
+  owner = "unspecified"
+  latlong = "unspecified"
+  url = "unspecified"
+}
+
+/* The host section describes attributes of the host, like the location */
+host {
+  location = "unspecified"
+}
+
+/* You can specify as many tcp_accept_channels as you like to share
+ * an XML description of the state of the cluster.
+ *
+ * At the very least, every gmond must expose its XML state to 
+ * queriers from localhost.
+ */
+tcp_accept_channel {
+  bind = localhost
+  port = ${gmondPort}
+}
+
+/* Each metrics module that is referenced by gmond must be specified and
+   loaded. If the module has been statically linked with gmond, it does
+   not require a load path. However all dynamically loadable modules must
+   include a load path. */
+modules {
+  module {
+    name = "core_metrics"
+  }
+  module {
+    name = "cpu_module"
+    path = "modcpu.so"
+  }
+  module {
+    name = "disk_module"
+    path = "moddisk.so"
+  }
+  module {
+    name = "load_module"
+    path = "modload.so"
+  }
+  module {
+    name = "mem_module"
+    path = "modmem.so"
+  }
+  module {
+    name = "net_module"
+    path = "modnet.so"
+  }
+  module {
+    name = "proc_module"
+    path = "modproc.so"
+  }
+  module {
+    name = "sys_module"
+    path = "modsys.so"
+  }
+}
+
+/* The old internal 2.5.x metric array has been replaced by the following
+   collection_group directives.  What follows is the default behavior for
+   collecting and sending metrics that is as close to 2.5.x behavior as
+   possible. */
+
+/* This collection group will cause a heartbeat (or beacon) to be sent every
+   20 seconds.  In the heartbeat is the GMOND_STARTED data which expresses
+   the age of the running gmond. */
+collection_group {
+  collect_once = yes
+  time_threshold = 20
+  metric {
+    name = "heartbeat"
+  }
+}
+
+/* This collection group will send general info about this host total memory every
+   180 secs.
+   This information doesn't change between reboots and is only collected
+   once. This information needed for heatmap showing */
+ collection_group {
+   collect_once = yes
+   time_threshold = 180
+   metric {
+    name = "mem_total"
+    title = "Memory Total"
+   }
+ }
+
+/* This collection group will send general info about this host every
+   1200 secs.
+   This information doesn't change between reboots and is only collected
+   once. */
+collection_group {
+  collect_once = yes
+  time_threshold = 1200
+  metric {
+    name = "cpu_num"
+    title = "CPU Count"
+  }
+  metric {
+    name = "cpu_speed"
+    title = "CPU Speed"
+  }
+  /* Should this be here? Swap can be added/removed between reboots. */
+  metric {
+    name = "swap_total"
+    title = "Swap Space Total"
+  }
+  metric {
+    name = "boottime"
+    title = "Last Boot Time"
+  }
+  metric {
+    name = "machine_type"
+    title = "Machine Type"
+  }
+  metric {
+    name = "os_name"
+    title = "Operating System"
+  }
+  metric {
+    name = "os_release"
+    title = "Operating System Release"
+  }
+  metric {
+    name = "location"
+    title = "Location"
+  }
+}
+
+/* This collection group will send the status of gexecd for this host
+   every 300 secs.*/
+/* Unlike 2.5.x the default behavior is to report gexecd OFF. */
+collection_group {
+  collect_once = yes
+  time_threshold = 300
+  metric {
+    name = "gexec"
+    title = "Gexec Status"
+  }
+}
+
+/* This collection group will collect the CPU status info every 20 secs.
+   The time threshold is set to 90 seconds.  In honesty, this
+   time_threshold could be set significantly higher to reduce
+   unneccessary  network chatter. */
+collection_group {
+  collect_every = 20
+  time_threshold = 90
+  /* CPU status */
+  metric {
+    name = "cpu_user"
+    value_threshold = "1.0"
+    title = "CPU User"
+  }
+  metric {
+    name = "cpu_system"
+    value_threshold = "1.0"
+    title = "CPU System"
+  }
+  metric {
+    name = "cpu_idle"
+    value_threshold = "5.0"
+    title = "CPU Idle"
+  }
+  metric {
+    name = "cpu_nice"
+    value_threshold = "1.0"
+    title = "CPU Nice"
+  }
+  metric {
+    name = "cpu_aidle"
+    value_threshold = "5.0"
+    title = "CPU aidle"
+  }
+  metric {
+    name = "cpu_wio"
+    value_threshold = "1.0"
+    title = "CPU wio"
+  }
+  /* The next two metrics are optional if you want more detail...
+     ... since they are accounted for in cpu_system.
+  metric {
+    name = "cpu_intr"
+    value_threshold = "1.0"
+    title = "CPU intr"
+  }
+  metric {
+    name = "cpu_sintr"
+    value_threshold = "1.0"
+    title = "CPU sintr"
+  }
+  */
+}
+
+collection_group {
+  collect_every = 20
+  time_threshold = 90
+  /* Load Averages */
+  metric {
+    name = "load_one"
+    value_threshold = "1.0"
+    title = "One Minute Load Average"
+  }
+  metric {
+    name = "load_five"
+    value_threshold = "1.0"
+    title = "Five Minute Load Average"
+  }
+  metric {
+    name = "load_fifteen"
+    value_threshold = "1.0"
+    title = "Fifteen Minute Load Average"
+  }
+}
+
+/* This group collects the number of running and total processes */
+collection_group {
+  collect_every = 80
+  time_threshold = 950
+  metric {
+    name = "proc_run"
+    value_threshold = "1.0"
+    title = "Total Running Processes"
+  }
+  metric {
+    name = "proc_total"
+    value_threshold = "1.0"
+    title = "Total Processes"
+  }
+}
+
+/* This collection group grabs the volatile memory metrics every 40 secs and
+   sends them at least every 180 secs.  This time_threshold can be increased
+   significantly to reduce unneeded network traffic. */
+collection_group {
+  collect_every = 40
+  time_threshold = 180
+  metric {
+    name = "mem_free"
+    value_threshold = "1024.0"
+    title = "Free Memory"
+  }
+  metric {
+    name = "mem_shared"
+    value_threshold = "1024.0"
+    title = "Shared Memory"
+  }
+  metric {
+    name = "mem_buffers"
+    value_threshold = "1024.0"
+    title = "Memory Buffers"
+  }
+  metric {
+    name = "mem_cached"
+    value_threshold = "1024.0"
+    title = "Cached Memory"
+  }
+  metric {
+    name = "swap_free"
+    value_threshold = "1024.0"
+    title = "Free Swap Space"
+  }
+}
+
+collection_group {
+  collect_every = 40
+  time_threshold = 300
+  metric {
+    name = "bytes_out"
+    value_threshold = 4096
+    title = "Bytes Sent"
+  }
+  metric {
+    name = "bytes_in"
+    value_threshold = 4096
+    title = "Bytes Received"
+  }
+  metric {
+    name = "pkts_in"
+    value_threshold = 256
+    title = "Packets Received"
+  }
+  metric {
+    name = "pkts_out"
+    value_threshold = 256
+    title = "Packets Sent"
+  }
+}
+
+
+collection_group {
+  collect_every = 40
+  time_threshold = 180
+  metric {
+    name = "disk_free"
+    value_threshold = 1.0
+    title = "Disk Space Available"
+  }
+  metric {
+    name = "part_max_used"
+    value_threshold = 1.0
+    title = "Maximum Disk Space Used"
+  }
+  metric {
+    name = "disk_total"
+    value_threshold = 1.0
+    title = "Total Disk Space"
+  }
+}
+
+udp_recv_channel {
+    port = 0
+}
+
+
+include ("${GANGLIA_CONF_DIR}/${gmondClusterName}/conf.d/*.conf")
+END_OF_GMOND_CORE_CONF
+        else
+            return 2;
+        fi
+    else
+        return 1;
+    fi
+}
+
+function generateGmondMasterConf
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
+
+        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
+        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
+        then
+            now=`date`;
+
+            cat << END_OF_GMOND_MASTER_CONF
+#################### Generated by ${0} on ${now} ####################
+/* Masters only receive; they never send. */
+udp_recv_channel {
+  bind = ${gmondMasterIP}
+  port = ${gmondPort}
+}
+
+/* The gmond cluster master must additionally provide an XML 
+ * description of the cluster to the gmetad that will query it.
+ */
+tcp_accept_channel {
+  bind = ${gmondMasterIP}
+  port = ${gmondPort}
+}
+END_OF_GMOND_MASTER_CONF
+        else
+            return 2;
+        fi
+    else
+        return 1;
+    fi
+}
+
+function generateGmondSlaveConf
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        read gmondClusterName gmondMasterIP gmondPort <<<`getGangliaClusterInfo ${clusterName}`;
+
+        # Check that all of ${gmondClusterName} and ${gmondMasterIP} and ${gmondPort} are populated.
+        if [ "x" != "x${gmondClusterName}" -a "x" != "x${gmondMasterIP}" -a "x" != "x${gmondPort}" ]
+        then
+            now=`date`;
+
+            cat << END_OF_GMOND_SLAVE_CONF
+#################### Generated by ${0} on ${now} ####################
+/* Slaves only send; they never receive. */
+udp_send_channel {
+  #bind_hostname = yes # Highly recommended, soon to be default.
+                       # This option tells gmond to use a source address
+                       # that resolves to the machine's hostname.  Without
+                       # this, the metrics may appear to come from any
+                       # interface and the DNS names associated with
+                       # those IPs will be used to create the RRDs.
+  host = ${gmondMasterIP}
+  port = ${gmondPort}
+  ttl = 1
+}
+END_OF_GMOND_SLAVE_CONF
+        else
+            return 2;
+        fi
+    else
+        return 1;
+    fi
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/rrd.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/rrd.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/rrd.py
new file mode 100644
index 0000000..3fe6901
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/rrd.py
@@ -0,0 +1,213 @@
+#!/usr/bin/env python
+
+'''
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+'''
+
+import cgi
+import os
+import rrdtool
+import sys
+import time
+import re
+import urlparse
+
+# place this script in /var/www/cgi-bin of the Ganglia collector
+# requires 'yum install rrdtool-python' on the Ganglia collector
+
+
+def printMetric(clusterName, hostName, metricName, file, cf, start, end,
+                resolution, pointInTime):
+  if clusterName.endswith("rrds"):
+    clusterName = ""
+
+  args = [file, cf]
+
+  if start is not None:
+    args.extend(["-s", start])
+
+  if end is not None:
+    args.extend(["-e", end])
+
+  if resolution is not None:
+    args.extend(["-r", resolution])
+
+  rrdMetric = rrdtool.fetch(args)
+  # ds_name
+  sys.stdout.write(rrdMetric[1][0])
+  sys.stdout.write("\n")
+
+  sys.stdout.write(clusterName)
+  sys.stdout.write("\n")
+  sys.stdout.write(hostName)
+  sys.stdout.write("\n")
+  sys.stdout.write(metricName)
+  sys.stdout.write("\n")
+
+  # write time
+  sys.stdout.write(str(rrdMetric[0][0]))
+  sys.stdout.write("\n")
+  # write step
+  sys.stdout.write(str(rrdMetric[0][2]))
+  sys.stdout.write("\n")
+
+  if not pointInTime:
+    valueCount = 0
+    lastValue = None
+
+    for tuple in rrdMetric[2]:
+
+      thisValue = tuple[0]
+
+      if valueCount > 0 and thisValue == lastValue:
+        valueCount += 1
+      else:
+        if valueCount > 1:
+          sys.stdout.write("[~r]")
+          sys.stdout.write(str(valueCount))
+          sys.stdout.write("\n")
+
+        if thisValue is None:
+          sys.stdout.write("[~n]\n")
+        else:
+          sys.stdout.write(str(thisValue))
+          sys.stdout.write("\n")
+
+        valueCount = 1
+        lastValue = thisValue
+  else:
+    value = None
+    idx = -1
+    tuple = rrdMetric[2]
+    tupleLastIdx = len(tuple) * -1
+
+    while value is None and idx >= tupleLastIdx:
+      value = tuple[idx][0]
+      idx -= 1
+
+    if value is not None:
+      sys.stdout.write(str(value))
+      sys.stdout.write("\n")
+
+  sys.stdout.write("[~EOM]\n")
+  return
+
+
+def stripList(l):
+  return ([x.strip() for x in l])
+
+
+sys.stdout.write("Content-type: text/plain\n\n")
+
+# write start time
+sys.stdout.write(str(time.mktime(time.gmtime())))
+sys.stdout.write("\n")
+
+requestMethod = os.environ['REQUEST_METHOD']
+
+if requestMethod == 'POST':
+  postData = sys.stdin.readline()
+  queryString = cgi.parse_qs(postData)
+  queryString = dict((k, v[0]) for k, v in queryString.items())
+elif requestMethod == 'GET':
+  queryString = dict(cgi.parse_qsl(os.environ['QUERY_STRING']));
+
+if "m" in queryString:
+  metricParts = queryString["m"].split(",")
+else:
+  metricParts = [""]
+metricParts = stripList(metricParts)
+
+hostParts = []
+if "h" in queryString:
+  hostParts = queryString["h"].split(",")
+hostParts = stripList(hostParts)
+
+if "c" in queryString:
+  clusterParts = queryString["c"].split(",")
+else:
+  clusterParts = [""]
+clusterParts = stripList(clusterParts)
+
+if "p" in queryString:
+  rrdPath = queryString["p"]
+else:
+  rrdPath = "/var/lib/ganglia/rrds/"
+
+start = None
+if "s" in queryString:
+  start = queryString["s"]
+
+end = None
+if "e" in queryString:
+  end = queryString["e"]
+
+resolution = None
+if "r" in queryString:
+  resolution = queryString["r"]
+
+if "cf" in queryString:
+  cf = queryString["cf"]
+else:
+  cf = "AVERAGE"
+
+if "pt" in queryString:
+  pointInTime = True
+else:
+  pointInTime = False
+
+
+def _walk(*args, **kwargs):
+  for root, dirs, files in os.walk(*args, **kwargs):
+    for dir in dirs:
+      qualified_dir = os.path.join(root, dir)
+      if os.path.islink(qualified_dir):
+        for x in os.walk(qualified_dir, **kwargs):
+          yield x
+    yield (root, dirs, files)
+
+
+for cluster in clusterParts:
+  for path, dirs, files in _walk(rrdPath + cluster):
+    pathParts = path.split("/")
+    #Process only path which contains files. If no host parameter passed - process all hosts folders and summary info
+    #If host parameter passed - process only this host folder
+    if len(files) > 0 and (len(hostParts) == 0 or pathParts[-1] in hostParts):
+      for metric in metricParts:
+        file = metric + ".rrd"
+        fileFullPath = os.path.join(path, file)
+        if os.path.exists(fileFullPath):
+          #Exact name of metric
+          printMetric(pathParts[-2], pathParts[-1], file[:-4],
+                      os.path.join(path, file), cf, start, end, resolution,
+                      pointInTime)
+        else:
+          #Regex as metric name
+          metricRegex = metric + '\.rrd$'
+          p = re.compile(metricRegex)
+          matchedFiles = filter(p.match, files)
+          for matchedFile in matchedFiles:
+            printMetric(pathParts[-2], pathParts[-1], matchedFile[:-4],
+                        os.path.join(path, matchedFile), cf, start, end,
+                        resolution, pointInTime)
+
+sys.stdout.write("[~EOF]\n")
+# write end time
+sys.stdout.write(str(time.mktime(time.gmtime())))
+sys.stdout.write("\n")
+
+sys.stdout.flush

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/rrdcachedLib.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/rrdcachedLib.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/rrdcachedLib.sh
new file mode 100644
index 0000000..8b7c257
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/rrdcachedLib.sh
@@ -0,0 +1,47 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get access to Ganglia-wide constants etc.
+source ./gangliaLib.sh;
+
+RRDCACHED_BIN=/usr/bin/rrdcached;
+RRDCACHED_PID_FILE=${GANGLIA_RUNTIME_DIR}/rrdcached.pid;
+RRDCACHED_ALL_ACCESS_UNIX_SOCKET=${GANGLIA_RUNTIME_DIR}/rrdcached.sock;
+RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET=${GANGLIA_RUNTIME_DIR}/rrdcached.limited.sock;
+
+function getRrdcachedLoggedPid()
+{
+    if [ -e "${RRDCACHED_PID_FILE}" ]
+    then
+        echo `cat ${RRDCACHED_PID_FILE}`;
+    fi
+}
+
+function getRrdcachedRunningPid()
+{
+    rrdcachedLoggedPid=`getRrdcachedLoggedPid`;
+
+    if [ -n "${rrdcachedLoggedPid}" ]
+    then
+        echo `ps -o pid=MYPID -p ${rrdcachedLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
+    fi
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/setupGanglia.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/setupGanglia.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/setupGanglia.sh
new file mode 100644
index 0000000..5145b9c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/setupGanglia.sh
@@ -0,0 +1,141 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get access to Ganglia-wide constants, utilities etc.
+source ./gangliaLib.sh
+
+function usage()
+{
+  cat << END_USAGE
+Usage: ${0} [-c <gmondClusterName> [-m]] [-t] [-o <owner>] [-g <group>]
+
+Options:
+  -c <gmondClusterName>   The name of the Ganglia Cluster whose gmond configuration we're here to generate.
+
+  -m                      Whether this gmond (if -t is not specified) is the master for its Ganglia 
+                          Cluster. Without this, we generate slave gmond configuration.
+
+  -t                      Whether this is a call to generate gmetad configuration (as opposed to the
+                          gmond configuration that is generated without this).
+  -o <owner>              Owner
+  -g <group>              Group
+END_USAGE
+}
+
+function instantiateGmetadConf()
+{
+  # gmetad utility library.
+  source ./gmetadLib.sh;
+
+  generateGmetadConf > ${GMETAD_CONF_FILE};
+}
+
+function instantiateGmondConf()
+{
+  # gmond utility library.
+  source ./gmondLib.sh;
+ 
+  gmondClusterName=${1};
+
+  if [ "x" != "x${gmondClusterName}" ]
+  then
+
+    createDirectory "${GANGLIA_RUNTIME_DIR}/${gmondClusterName}";
+    createDirectory "${GANGLIA_CONF_DIR}/${gmondClusterName}/conf.d";
+    
+    # Always blindly generate the core gmond config - that goes on every box running gmond. 
+    generateGmondCoreConf ${gmondClusterName} > `getGmondCoreConfFileName ${gmondClusterName}`;
+
+    isMasterGmond=${2};
+
+    # Decide whether we want to add on the master or slave gmond config.
+    if [ "0" -eq "${isMasterGmond}" ]
+    then
+      generateGmondSlaveConf ${gmondClusterName} > `getGmondSlaveConfFileName ${gmondClusterName}`;
+    else
+      generateGmondMasterConf ${gmondClusterName} > `getGmondMasterConfFileName ${gmondClusterName}`;
+    fi
+
+    chown -R ${3}:${4} ${GANGLIA_CONF_DIR}/${gmondClusterName}
+
+  else
+    echo "No gmondClusterName passed in, nothing to instantiate";
+  fi
+}
+
+# main()
+
+gmondClusterName=;
+isMasterGmond=0;
+configureGmetad=0;
+owner='root';
+group='root';
+
+while getopts ":c:mto:g:" OPTION
+do
+  case ${OPTION} in
+    c) 
+      gmondClusterName=${OPTARG};
+      ;;
+    m)
+      isMasterGmond=1;
+      ;;
+    t)
+      configureGmetad=1;
+      ;;
+    o)
+      owner=${OPTARG};
+      ;;
+    g)
+      group=${OPTARG};
+      ;;
+    ?)
+      usage;
+      exit 1;
+  esac
+done
+
+# Initialization.
+createDirectory ${GANGLIA_CONF_DIR};
+createDirectory ${GANGLIA_RUNTIME_DIR};
+# So rrdcached can drop its PID files in here.
+chmod a+w ${GANGLIA_RUNTIME_DIR};
+chown ${owner}:${group} ${GANGLIA_CONF_DIR};
+
+if [ -n "${gmondClusterName}" ]
+then
+
+  # Be forgiving of users who pass in -c along with -t (which always takes precedence).
+  if [ "1" -eq "${configureGmetad}" ]
+  then
+    instantiateGmetadConf;
+  else
+    instantiateGmondConf ${gmondClusterName} ${isMasterGmond} ${owner} ${group};
+  fi
+
+elif [ "1" -eq "${configureGmetad}" ]
+then
+  instantiateGmetadConf;
+else
+  usage;
+  exit 2;
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/startGmetad.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/startGmetad.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/startGmetad.sh
new file mode 100644
index 0000000..ab5102d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/startGmetad.sh
@@ -0,0 +1,64 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+source ./gmetadLib.sh;
+
+# To get access to ${RRDCACHED_ALL_ACCESS_UNIX_SOCKET}.
+source ./rrdcachedLib.sh;
+
+# Before starting gmetad, start rrdcached.
+./startRrdcached.sh;
+
+if [ $? -eq 0 ] 
+then
+    gmetadRunningPid=`getGmetadRunningPid`;
+
+    # Only attempt to start gmetad if there's not already one running.
+    if [ -z "${gmetadRunningPid}" ]
+    then
+        env RRDCACHED_ADDRESS=${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
+                    ${GMETAD_BIN} --conf=${GMETAD_CONF_FILE} --pid-file=${GMETAD_PID_FILE};
+
+        for i in `seq 0 5`; do
+          gmetadRunningPid=`getGmetadRunningPid`;
+          if [ -n "${gmetadRunningPid}" ]
+          then
+            break;
+          fi
+          sleep 1;
+        done
+
+        if [ -n "${gmetadRunningPid}" ]
+        then
+            echo "Started ${GMETAD_BIN} with PID ${gmetadRunningPid}";
+        else
+            echo "Failed to start ${GMETAD_BIN}";
+            exit 1;
+        fi
+    else
+        echo "${GMETAD_BIN} already running with PID ${gmetadRunningPid}";
+    fi
+else
+    echo "Not starting ${GMETAD_BIN} because starting ${RRDCACHED_BIN} failed.";
+    exit 2;
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/startGmond.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/startGmond.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/startGmond.sh
new file mode 100644
index 0000000..239b62e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/GANGLIA/package/files/startGmond.sh
@@ -0,0 +1,80 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
+source ./gmondLib.sh;
+
+function startGmondForCluster()
+{
+    gmondClusterName=${1};
+
+    gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
+
+    # Only attempt to start gmond if there's not already one running.
+    if [ -z "${gmondRunningPid}" ]
+    then
+      gmondCoreConfFileName=`getGmondCoreConfFileName ${gmondClusterName}`;
+
+      if [ -e "${gmondCoreConfFileName}" ]
+      then 
+        gmondPidFileName=`getGmondPidFileName ${gmondClusterName}`;
+
+        ${GMOND_BIN} --conf=${gmondCoreConfFileName} --pid-file=${gmondPidFileName};
+
+        for i in `seq 0 5`; do
+          gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
+          if [ -n "${gmondRunningPid}" ]
+          then
+            break;
+          fi
+          sleep 1;
+        done
+  
+        if [ -n "${gmondRunningPid}" ]
+        then
+            echo "Started ${GMOND_BIN} for cluster ${gmondClusterName} with PID ${gmondRunningPid}";
+        else
+            echo "Failed to start ${GMOND_BIN} for cluster ${gmondClusterName}";
+            exit 1;
+        fi
+      fi 
+    else
+      echo "${GMOND_BIN} for cluster ${gmondClusterName} already running with PID ${gmondRunningPid}";
+    fi
+}
+
+# main()
+gmondClusterName=${1};
+
+if [ "x" == "x${gmondClusterName}" ]
+then
+    # No ${gmondClusterName} passed in as command-line arg, so start 
+    # all the gmonds we know about.
+    for gmondClusterName in `getConfiguredGangliaClusterNames`
+    do
+        startGmondForCluster ${gmondClusterName};
+    done
+else
+    # Just start the one ${gmondClusterName} that was asked for.
+    startGmondForCluster ${gmondClusterName};
+fi


[20/37] AMBARI-4341. Rename 2.0.8 to 2.1.1 in the stack definition. (mahadev)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/templates/nagios.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/templates/nagios.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/templates/nagios.cfg.j2
deleted file mode 100644
index acb2522..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/templates/nagios.cfg.j2
+++ /dev/null
@@ -1,1349 +0,0 @@
-##############################################################################
-#
-# NAGIOS.CFG - Sample Main Config File for Nagios 3.2.3
-#
-# Read the documentation for more information on this configuration
-# file.  I've provided some comments here, but things may not be so
-# clear without further explanation.
-#
-# Last Modified: 12-14-2008
-#
-##############################################################################
-
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-# LOG FILE
-# This is the main log file where service and host events are logged
-# for historical purposes.  This should be the first option specified 
-# in the config file!!!
-
-log_file=/var/log/nagios/nagios.log
-
-
-
-# OBJECT CONFIGURATION FILE(S)
-# These are the object configuration files in which you define hosts,
-# host groups, contacts, contact groups, services, etc.
-# You can split your object definitions across several config files
-# if you wish (as shown below), or keep them all in a single config file.
-
-# You can specify individual object config files as shown below:
-cfg_file=/etc/nagios/objects/commands.cfg
-cfg_file=/etc/nagios/objects/contacts.cfg
-cfg_file=/etc/nagios/objects/timeperiods.cfg
-cfg_file=/etc/nagios/objects/templates.cfg
-
-# Definitions for monitoring the local (Linux) host
-#cfg_file=/etc/nagios/objects/localhost.cfg
-
-# Definitions for monitoring a Windows machine
-#cfg_file=/etc/nagios/objects/windows.cfg
-
-# Definitions for monitoring a router/switch
-#cfg_file=/etc/nagios/objects/switch.cfg
-
-# Definitions for monitoring a network printer
-#cfg_file=/etc/nagios/objects/printer.cfg
-
-# Definitions for hadoop servers
-cfg_file={{nagios_host_cfg}}
-cfg_file={{nagios_hostgroup_cfg}}
-cfg_file={{nagios_servicegroup_cfg}}
-cfg_file={{nagios_service_cfg}}
-cfg_file={{nagios_command_cfg}}
-
-
-# You can also tell Nagios to process all config files (with a .cfg
-# extension) in a particular directory by using the cfg_dir
-# directive as shown below:
-
-#cfg_dir=/etc/nagios/servers
-#cfg_dir=/etc/nagios/printers
-#cfg_dir=/etc/nagios/switches
-#cfg_dir=/etc/nagios/routers
-
-
-
-
-# OBJECT CACHE FILE
-# This option determines where object definitions are cached when
-# Nagios starts/restarts.  The CGIs read object definitions from 
-# this cache file (rather than looking at the object config files
-# directly) in order to prevent inconsistencies that can occur
-# when the config files are modified after Nagios starts.
-
-object_cache_file=/var/nagios/objects.cache
-
-
-
-# PRE-CACHED OBJECT FILE
-# This options determines the location of the precached object file.
-# If you run Nagios with the -p command line option, it will preprocess
-# your object configuration file(s) and write the cached config to this
-# file.  You can then start Nagios with the -u option to have it read
-# object definitions from this precached file, rather than the standard
-# object configuration files (see the cfg_file and cfg_dir options above).
-# Using a precached object file can speed up the time needed to (re)start 
-# the Nagios process if you've got a large and/or complex configuration.
-# Read the documentation section on optimizing Nagios to find our more
-# about how this feature works.
-
-precached_object_file=/var/nagios/objects.precache
-
-
-
-# RESOURCE FILE
-# This is an optional resource file that contains $USERx$ macro
-# definitions. Multiple resource files can be specified by using
-# multiple resource_file definitions.  The CGIs will not attempt to
-# read the contents of resource files, so information that is
-# considered to be sensitive (usernames, passwords, etc) can be
-# defined as macros in this file and restrictive permissions (600)
-# can be placed on this file.
-
-resource_file={{nagios_resource_cfg}}
-
-
-
-# STATUS FILE
-# This is where the current status of all monitored services and
-# hosts is stored.  Its contents are read and processed by the CGIs.
-# The contents of the status file are deleted every time Nagios
-#  restarts.
-
-status_file=/var/nagios/status.dat
-
-
-
-# STATUS FILE UPDATE INTERVAL
-# This option determines the frequency (in seconds) that
-# Nagios will periodically dump program, host, and 
-# service status data.
-
-status_update_interval=10
-
-
-
-# NAGIOS USER
-# This determines the effective user that Nagios should run as.  
-# You can either supply a username or a UID.
-
-nagios_user={{nagios_user}}
-
-
-
-# NAGIOS GROUP
-# This determines the effective group that Nagios should run as.  
-# You can either supply a group name or a GID.
-
-nagios_group={{nagios_group}}
-
-
-
-# EXTERNAL COMMAND OPTION
-# This option allows you to specify whether or not Nagios should check
-# for external commands (in the command file defined below).  By default
-# Nagios will *not* check for external commands, just to be on the
-# cautious side.  If you want to be able to use the CGI command interface
-# you will have to enable this.
-# Values: 0 = disable commands, 1 = enable commands
-
-check_external_commands=1
-
-
-
-# EXTERNAL COMMAND CHECK INTERVAL
-# This is the interval at which Nagios should check for external commands.
-# This value works of the interval_length you specify later.  If you leave
-# that at its default value of 60 (seconds), a value of 1 here will cause
-# Nagios to check for external commands every minute.  If you specify a
-# number followed by an "s" (i.e. 15s), this will be interpreted to mean
-# actual seconds rather than a multiple of the interval_length variable.
-# Note: In addition to reading the external command file at regularly 
-# scheduled intervals, Nagios will also check for external commands after
-# event handlers are executed.
-# NOTE: Setting this value to -1 causes Nagios to check the external
-# command file as often as possible.
-
-#command_check_interval=15s
-command_check_interval=-1
-
-
-
-# EXTERNAL COMMAND FILE
-# This is the file that Nagios checks for external command requests.
-# It is also where the command CGI will write commands that are submitted
-# by users, so it must be writeable by the user that the web server
-# is running as (usually 'nobody').  Permissions should be set at the 
-# directory level instead of on the file, as the file is deleted every
-# time its contents are processed.
-
-command_file=/var/nagios/rw/nagios.cmd
-
-
-
-# EXTERNAL COMMAND BUFFER SLOTS
-# This settings is used to tweak the number of items or "slots" that
-# the Nagios daemon should allocate to the buffer that holds incoming 
-# external commands before they are processed.  As external commands 
-# are processed by the daemon, they are removed from the buffer.  
-
-external_command_buffer_slots=4096
-
-
-
-# LOCK FILE
-# This is the lockfile that Nagios will use to store its PID number
-# in when it is running in daemon mode.
-
-lock_file={{nagios_pid_file}}
-
-
-
-# TEMP FILE
-# This is a temporary file that is used as scratch space when Nagios
-# updates the status log, cleans the comment file, etc.  This file
-# is created, used, and deleted throughout the time that Nagios is
-# running.
-
-temp_file=/var/nagios/nagios.tmp
-
-
-
-# TEMP PATH
-# This is path where Nagios can create temp files for service and
-# host check results, etc.
-
-temp_path=/tmp
-
-
-
-# EVENT BROKER OPTIONS
-# Controls what (if any) data gets sent to the event broker.
-# Values:  0      = Broker nothing
-#         -1      = Broker everything
-#         <other> = See documentation
-
-event_broker_options=-1
-
-
-
-# EVENT BROKER MODULE(S)
-# This directive is used to specify an event broker module that should
-# by loaded by Nagios at startup.  Use multiple directives if you want
-# to load more than one module.  Arguments that should be passed to
-# the module at startup are seperated from the module path by a space.
-#
-#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-# WARNING !!! WARNING !!! WARNING !!! WARNING !!! WARNING !!! WARNING
-#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-#
-# Do NOT overwrite modules while they are being used by Nagios or Nagios
-# will crash in a fiery display of SEGFAULT glory.  This is a bug/limitation
-# either in dlopen(), the kernel, and/or the filesystem.  And maybe Nagios...
-#
-# The correct/safe way of updating a module is by using one of these methods:
-#    1. Shutdown Nagios, replace the module file, restart Nagios
-#    2. Delete the original module file, move the new module file into place, restart Nagios
-#
-# Example:
-#
-#   broker_module=<modulepath> [moduleargs]
-
-#broker_module=/somewhere/module1.o
-#broker_module=/somewhere/module2.o arg1 arg2=3 debug=0
-
-
-
-# LOG ROTATION METHOD
-# This is the log rotation method that Nagios should use to rotate
-# the main log file. Values are as follows..
-#	n	= None - don't rotate the log
-#	h	= Hourly rotation (top of the hour)
-#	d	= Daily rotation (midnight every day)
-#	w	= Weekly rotation (midnight on Saturday evening)
-#	m	= Monthly rotation (midnight last day of month)
-
-log_rotation_method=d
-
-
-
-# LOG ARCHIVE PATH
-# This is the directory where archived (rotated) log files should be 
-# placed (assuming you've chosen to do log rotation).
-
-log_archive_path=/var/log/nagios/archives
-
-
-
-# LOGGING OPTIONS
-# If you want messages logged to the syslog facility, as well as the
-# Nagios log file set this option to 1.  If not, set it to 0.
-
-use_syslog=1
-
-
-
-# NOTIFICATION LOGGING OPTION
-# If you don't want notifications to be logged, set this value to 0.
-# If notifications should be logged, set the value to 1.
-
-log_notifications=1
-
-
-
-# SERVICE RETRY LOGGING OPTION
-# If you don't want service check retries to be logged, set this value
-# to 0.  If retries should be logged, set the value to 1.
-
-log_service_retries=1
-
-
-
-# HOST RETRY LOGGING OPTION
-# If you don't want host check retries to be logged, set this value to
-# 0.  If retries should be logged, set the value to 1.
-
-log_host_retries=1
-
-
-
-# EVENT HANDLER LOGGING OPTION
-# If you don't want host and service event handlers to be logged, set
-# this value to 0.  If event handlers should be logged, set the value
-# to 1.
-
-log_event_handlers=1
-
-
-
-# INITIAL STATES LOGGING OPTION
-# If you want Nagios to log all initial host and service states to
-# the main log file (the first time the service or host is checked)
-# you can enable this option by setting this value to 1.  If you
-# are not using an external application that does long term state
-# statistics reporting, you do not need to enable this option.  In
-# this case, set the value to 0.
-
-log_initial_states=0
-
-
-
-# EXTERNAL COMMANDS LOGGING OPTION
-# If you don't want Nagios to log external commands, set this value
-# to 0.  If external commands should be logged, set this value to 1.
-# Note: This option does not include logging of passive service
-# checks - see the option below for controlling whether or not
-# passive checks are logged.
-
-log_external_commands=1
-
-
-
-# PASSIVE CHECKS LOGGING OPTION
-# If you don't want Nagios to log passive host and service checks, set
-# this value to 0.  If passive checks should be logged, set
-# this value to 1.
-
-log_passive_checks=1
-
-
-
-# GLOBAL HOST AND SERVICE EVENT HANDLERS
-# These options allow you to specify a host and service event handler
-# command that is to be run for every host or service state change.
-# The global event handler is executed immediately prior to the event
-# handler that you have optionally specified in each host or
-# service definition. The command argument is the short name of a
-# command definition that you define in your host configuration file.
-# Read the HTML docs for more information.
-
-#global_host_event_handler=somecommand
-#global_service_event_handler=somecommand
-
-
-
-# SERVICE INTER-CHECK DELAY METHOD
-# This is the method that Nagios should use when initially
-# "spreading out" service checks when it starts monitoring.  The
-# default is to use smart delay calculation, which will try to
-# space all service checks out evenly to minimize CPU load.
-# Using the dumb setting will cause all checks to be scheduled
-# at the same time (with no delay between them)!  This is not a
-# good thing for production, but is useful when testing the
-# parallelization functionality.
-#	n	= None - don't use any delay between checks
-#	d	= Use a "dumb" delay of 1 second between checks
-#	s	= Use "smart" inter-check delay calculation
-#       x.xx    = Use an inter-check delay of x.xx seconds
-
-service_inter_check_delay_method=s
-
-
-
-# MAXIMUM SERVICE CHECK SPREAD
-# This variable determines the timeframe (in minutes) from the
-# program start time that an initial check of all services should
-# be completed.  Default is 30 minutes.
-
-max_service_check_spread=30
-
-
-
-# SERVICE CHECK INTERLEAVE FACTOR
-# This variable determines how service checks are interleaved.
-# Interleaving the service checks allows for a more even
-# distribution of service checks and reduced load on remote
-# hosts.  Setting this value to 1 is equivalent to how versions
-# of Nagios previous to 0.0.5 did service checks.  Set this
-# value to s (smart) for automatic calculation of the interleave
-# factor unless you have a specific reason to change it.
-#       s       = Use "smart" interleave factor calculation
-#       x       = Use an interleave factor of x, where x is a
-#                 number greater than or equal to 1.
-
-service_interleave_factor=s
-
-
-
-# HOST INTER-CHECK DELAY METHOD
-# This is the method that Nagios should use when initially
-# "spreading out" host checks when it starts monitoring.  The
-# default is to use smart delay calculation, which will try to
-# space all host checks out evenly to minimize CPU load.
-# Using the dumb setting will cause all checks to be scheduled
-# at the same time (with no delay between them)!
-#	n	= None - don't use any delay between checks
-#	d	= Use a "dumb" delay of 1 second between checks
-#	s	= Use "smart" inter-check delay calculation
-#       x.xx    = Use an inter-check delay of x.xx seconds
-
-host_inter_check_delay_method=s
-
-
-
-# MAXIMUM HOST CHECK SPREAD
-# This variable determines the timeframe (in minutes) from the
-# program start time that an initial check of all hosts should
-# be completed.  Default is 30 minutes.
-
-max_host_check_spread=30
-
-
-
-# MAXIMUM CONCURRENT SERVICE CHECKS
-# This option allows you to specify the maximum number of 
-# service checks that can be run in parallel at any given time.
-# Specifying a value of 1 for this variable essentially prevents
-# any service checks from being parallelized.  A value of 0
-# will not restrict the number of concurrent checks that are
-# being executed.
-
-max_concurrent_checks=0
-
-
-
-# HOST AND SERVICE CHECK REAPER FREQUENCY
-# This is the frequency (in seconds!) that Nagios will process
-# the results of host and service checks.
-
-check_result_reaper_frequency=10
-
-
-
-
-# MAX CHECK RESULT REAPER TIME
-# This is the max amount of time (in seconds) that  a single
-# check result reaper event will be allowed to run before 
-# returning control back to Nagios so it can perform other
-# duties.
-
-max_check_result_reaper_time=30
-
-
-
-
-# CHECK RESULT PATH
-# This is directory where Nagios stores the results of host and
-# service checks that have not yet been processed.
-#
-# Note: Make sure that only one instance of Nagios has access
-# to this directory!  
-
-check_result_path=/var/nagios/spool/checkresults
-
-
-
-
-# MAX CHECK RESULT FILE AGE
-# This option determines the maximum age (in seconds) which check
-# result files are considered to be valid.  Files older than this 
-# threshold will be mercilessly deleted without further processing.
-
-max_check_result_file_age=3600
-
-
-
-
-# CACHED HOST CHECK HORIZON
-# This option determines the maximum amount of time (in seconds)
-# that the state of a previous host check is considered current.
-# Cached host states (from host checks that were performed more
-# recently that the timeframe specified by this value) can immensely
-# improve performance in regards to the host check logic.
-# Too high of a value for this option may result in inaccurate host
-# states being used by Nagios, while a lower value may result in a
-# performance hit for host checks.  Use a value of 0 to disable host
-# check caching.
-
-cached_host_check_horizon=15
-
-
-
-# CACHED SERVICE CHECK HORIZON
-# This option determines the maximum amount of time (in seconds)
-# that the state of a previous service check is considered current.
-# Cached service states (from service checks that were performed more
-# recently that the timeframe specified by this value) can immensely
-# improve performance in regards to predictive dependency checks.
-# Use a value of 0 to disable service check caching.
-
-cached_service_check_horizon=15
-
-
-
-# ENABLE PREDICTIVE HOST DEPENDENCY CHECKS
-# This option determines whether or not Nagios will attempt to execute
-# checks of hosts when it predicts that future dependency logic test
-# may be needed.  These predictive checks can help ensure that your
-# host dependency logic works well.
-# Values:
-#  0 = Disable predictive checks
-#  1 = Enable predictive checks (default)
-
-enable_predictive_host_dependency_checks=1
-
-
-
-# ENABLE PREDICTIVE SERVICE DEPENDENCY CHECKS
-# This option determines whether or not Nagios will attempt to execute
-# checks of service when it predicts that future dependency logic test
-# may be needed.  These predictive checks can help ensure that your
-# service dependency logic works well.
-# Values:
-#  0 = Disable predictive checks
-#  1 = Enable predictive checks (default)
-
-enable_predictive_service_dependency_checks=1
-
-
-
-# SOFT STATE DEPENDENCIES
-# This option determines whether or not Nagios will use soft state 
-# information when checking host and service dependencies. Normally 
-# Nagios will only use the latest hard host or service state when 
-# checking dependencies. If you want it to use the latest state (regardless
-# of whether its a soft or hard state type), enable this option. 
-# Values:
-#  0 = Don't use soft state dependencies (default) 
-#  1 = Use soft state dependencies 
-
-soft_state_dependencies=0
-
-
-
-# TIME CHANGE ADJUSTMENT THRESHOLDS
-# These options determine when Nagios will react to detected changes
-# in system time (either forward or backwards).
-
-#time_change_threshold=900
-
-
-
-# AUTO-RESCHEDULING OPTION
-# This option determines whether or not Nagios will attempt to
-# automatically reschedule active host and service checks to
-# "smooth" them out over time.  This can help balance the load on
-# the monitoring server.  
-# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
-# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
-
-auto_reschedule_checks=0
-
-
-
-# AUTO-RESCHEDULING INTERVAL
-# This option determines how often (in seconds) Nagios will
-# attempt to automatically reschedule checks.  This option only
-# has an effect if the auto_reschedule_checks option is enabled.
-# Default is 30 seconds.
-# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
-# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
-
-auto_rescheduling_interval=30
-
-
-
-# AUTO-RESCHEDULING WINDOW
-# This option determines the "window" of time (in seconds) that
-# Nagios will look at when automatically rescheduling checks.
-# Only host and service checks that occur in the next X seconds
-# (determined by this variable) will be rescheduled. This option
-# only has an effect if the auto_reschedule_checks option is
-# enabled.  Default is 180 seconds (3 minutes).
-# WARNING: THIS IS AN EXPERIMENTAL FEATURE - IT CAN DEGRADE
-# PERFORMANCE, RATHER THAN INCREASE IT, IF USED IMPROPERLY
-
-auto_rescheduling_window=180
-
-
-
-# SLEEP TIME
-# This is the number of seconds to sleep between checking for system
-# events and service checks that need to be run.
-
-sleep_time=0.25
-
-
-
-# TIMEOUT VALUES
-# These options control how much time Nagios will allow various
-# types of commands to execute before killing them off.  Options
-# are available for controlling maximum time allotted for
-# service checks, host checks, event handlers, notifications, the
-# ocsp command, and performance data commands.  All values are in
-# seconds.
-
-service_check_timeout=60
-host_check_timeout=30
-event_handler_timeout=30
-notification_timeout=30
-ocsp_timeout=5
-perfdata_timeout=5
-
-
-
-# RETAIN STATE INFORMATION
-# This setting determines whether or not Nagios will save state
-# information for services and hosts before it shuts down.  Upon
-# startup Nagios will reload all saved service and host state
-# information before starting to monitor.  This is useful for 
-# maintaining long-term data on state statistics, etc, but will
-# slow Nagios down a bit when it (re)starts.  Since its only
-# a one-time penalty, I think its well worth the additional
-# startup delay.
-
-retain_state_information=1
-
-
-
-# STATE RETENTION FILE
-# This is the file that Nagios should use to store host and
-# service state information before it shuts down.  The state 
-# information in this file is also read immediately prior to
-# starting to monitor the network when Nagios is restarted.
-# This file is used only if the retain_state_information
-# variable is set to 1.
-
-state_retention_file=/var/nagios/retention.dat
-
-
-
-# RETENTION DATA UPDATE INTERVAL
-# This setting determines how often (in minutes) that Nagios
-# will automatically save retention data during normal operation.
-# If you set this value to 0, Nagios will not save retention
-# data at regular interval, but it will still save retention
-# data before shutting down or restarting.  If you have disabled
-# state retention, this option has no effect.
-
-retention_update_interval=60
-
-
-
-# USE RETAINED PROGRAM STATE
-# This setting determines whether or not Nagios will set 
-# program status variables based on the values saved in the
-# retention file.  If you want to use retained program status
-# information, set this value to 1.  If not, set this value
-# to 0.
-
-use_retained_program_state=1
-
-
-
-# USE RETAINED SCHEDULING INFO
-# This setting determines whether or not Nagios will retain
-# the scheduling info (next check time) for hosts and services
-# based on the values saved in the retention file.  If you
-# If you want to use retained scheduling info, set this
-# value to 1.  If not, set this value to 0.
-
-use_retained_scheduling_info=1
-
-
-
-# RETAINED ATTRIBUTE MASKS (ADVANCED FEATURE)
-# The following variables are used to specify specific host and
-# service attributes that should *not* be retained by Nagios during
-# program restarts.
-#
-# The values of the masks are bitwise ANDs of values specified
-# by the "MODATTR_" definitions found in include/common.h.  
-# For example, if you do not want the current enabled/disabled state
-# of flap detection and event handlers for hosts to be retained, you
-# would use a value of 24 for the host attribute mask...
-# MODATTR_EVENT_HANDLER_ENABLED (8) + MODATTR_FLAP_DETECTION_ENABLED (16) = 24
-
-# This mask determines what host attributes are not retained
-retained_host_attribute_mask=0
-
-# This mask determines what service attributes are not retained
-retained_service_attribute_mask=0
-
-# These two masks determine what process attributes are not retained.
-# There are two masks, because some process attributes have host and service
-# options.  For example, you can disable active host checks, but leave active
-# service checks enabled.
-retained_process_host_attribute_mask=0
-retained_process_service_attribute_mask=0
-
-# These two masks determine what contact attributes are not retained.
-# There are two masks, because some contact attributes have host and
-# service options.  For example, you can disable host notifications for
-# a contact, but leave service notifications enabled for them.
-retained_contact_host_attribute_mask=0
-retained_contact_service_attribute_mask=0
-
-
-
-# INTERVAL LENGTH
-# This is the seconds per unit interval as used in the
-# host/contact/service configuration files.  Setting this to 60 means
-# that each interval is one minute long (60 seconds).  Other settings
-# have not been tested much, so your mileage is likely to vary...
-
-interval_length=60
-
-
-
-# CHECK FOR UPDATES
-# This option determines whether Nagios will automatically check to
-# see if new updates (releases) are available.  It is recommend that you
-# enable this option to ensure that you stay on top of the latest critical
-# patches to Nagios.  Nagios is critical to you - make sure you keep it in
-# good shape.  Nagios will check once a day for new updates. Data collected
-# by Nagios Enterprises from the update check is processed in accordance 
-# with our privacy policy - see http://api.nagios.org for details.
-
-check_for_updates=1
-
-
-
-# BARE UPDATE CHECK
-# This option deterines what data Nagios will send to api.nagios.org when
-# it checks for updates.  By default, Nagios will send information on the 
-# current version of Nagios you have installed, as well as an indicator as
-# to whether this was a new installation or not.  Nagios Enterprises uses
-# this data to determine the number of users running specific version of 
-# Nagios.  Enable this option if you do not want this information to be sent.
-
-bare_update_check=0
-
-
-
-# AGGRESSIVE HOST CHECKING OPTION
-# If you don't want to turn on aggressive host checking features, set
-# this value to 0 (the default).  Otherwise set this value to 1 to
-# enable the aggressive check option.  Read the docs for more info
-# on what aggressive host check is or check out the source code in
-# base/checks.c
-
-use_aggressive_host_checking=0
-
-
-
-# SERVICE CHECK EXECUTION OPTION
-# This determines whether or not Nagios will actively execute
-# service checks when it initially starts.  If this option is 
-# disabled, checks are not actively made, but Nagios can still
-# receive and process passive check results that come in.  Unless
-# you're implementing redundant hosts or have a special need for
-# disabling the execution of service checks, leave this enabled!
-# Values: 1 = enable checks, 0 = disable checks
-
-execute_service_checks=1
-
-
-
-# PASSIVE SERVICE CHECK ACCEPTANCE OPTION
-# This determines whether or not Nagios will accept passive
-# service checks results when it initially (re)starts.
-# Values: 1 = accept passive checks, 0 = reject passive checks
-
-accept_passive_service_checks=1
-
-
-
-# HOST CHECK EXECUTION OPTION
-# This determines whether or not Nagios will actively execute
-# host checks when it initially starts.  If this option is 
-# disabled, checks are not actively made, but Nagios can still
-# receive and process passive check results that come in.  Unless
-# you're implementing redundant hosts or have a special need for
-# disabling the execution of host checks, leave this enabled!
-# Values: 1 = enable checks, 0 = disable checks
-
-execute_host_checks=1
-
-
-
-# PASSIVE HOST CHECK ACCEPTANCE OPTION
-# This determines whether or not Nagios will accept passive
-# host checks results when it initially (re)starts.
-# Values: 1 = accept passive checks, 0 = reject passive checks
-
-accept_passive_host_checks=1
-
-
-
-# NOTIFICATIONS OPTION
-# This determines whether or not Nagios will sent out any host or
-# service notifications when it is initially (re)started.
-# Values: 1 = enable notifications, 0 = disable notifications
-
-enable_notifications=1
-
-
-
-# EVENT HANDLER USE OPTION
-# This determines whether or not Nagios will run any host or
-# service event handlers when it is initially (re)started.  Unless
-# you're implementing redundant hosts, leave this option enabled.
-# Values: 1 = enable event handlers, 0 = disable event handlers
-
-enable_event_handlers=1
-
-
-
-# PROCESS PERFORMANCE DATA OPTION
-# This determines whether or not Nagios will process performance
-# data returned from service and host checks.  If this option is
-# enabled, host performance data will be processed using the
-# host_perfdata_command (defined below) and service performance
-# data will be processed using the service_perfdata_command (also
-# defined below).  Read the HTML docs for more information on
-# performance data.
-# Values: 1 = process performance data, 0 = do not process performance data
-
-process_performance_data=0
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA PROCESSING COMMANDS
-# These commands are run after every host and service check is
-# performed.  These commands are executed only if the
-# enable_performance_data option (above) is set to 1.  The command
-# argument is the short name of a command definition that you 
-# define in your host configuration file.  Read the HTML docs for
-# more information on performance data.
-
-#host_perfdata_command=process-host-perfdata
-#service_perfdata_command=process-service-perfdata
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILES
-# These files are used to store host and service performance data.
-# Performance data is only written to these files if the
-# enable_performance_data option (above) is set to 1.
-
-#host_perfdata_file=/tmp/host-perfdata
-#service_perfdata_file=/tmp/service-perfdata
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILE TEMPLATES
-# These options determine what data is written (and how) to the
-# performance data files.  The templates may contain macros, special
-# characters (\t for tab, \r for carriage return, \n for newline)
-# and plain text.  A newline is automatically added after each write
-# to the performance data file.  Some examples of what you can do are
-# shown below.
-
-#host_perfdata_file_template=[HOSTPERFDATA]\t$TIMET$\t$HOSTNAME$\t$HOSTEXECUTIONTIME$\t$HOSTOUTPUT$\t$HOSTPERFDATA$
-#service_perfdata_file_template=[SERVICEPERFDATA]\t$TIMET$\t$HOSTNAME$\t$SERVICEDESC$\t$SERVICEEXECUTIONTIME$\t$SERVICELATENCY$\t$SERVICEOUTPUT$\t$SERVICEPERFDATA$
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILE MODES
-# This option determines whether or not the host and service
-# performance data files are opened in write ("w") or append ("a")
-# mode. If you want to use named pipes, you should use the special
-# pipe ("p") mode which avoid blocking at startup, otherwise you will
-# likely want the defult append ("a") mode.
-
-#host_perfdata_file_mode=a
-#service_perfdata_file_mode=a
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILE PROCESSING INTERVAL
-# These options determine how often (in seconds) the host and service
-# performance data files are processed using the commands defined
-# below.  A value of 0 indicates the files should not be periodically
-# processed.
-
-#host_perfdata_file_processing_interval=0
-#service_perfdata_file_processing_interval=0
-
-
-
-# HOST AND SERVICE PERFORMANCE DATA FILE PROCESSING COMMANDS
-# These commands are used to periodically process the host and
-# service performance data files.  The interval at which the
-# processing occurs is determined by the options above.
-
-#host_perfdata_file_processing_command=process-host-perfdata-file
-#service_perfdata_file_processing_command=process-service-perfdata-file
-
-
-
-# OBSESS OVER SERVICE CHECKS OPTION
-# This determines whether or not Nagios will obsess over service
-# checks and run the ocsp_command defined below.  Unless you're
-# planning on implementing distributed monitoring, do not enable
-# this option.  Read the HTML docs for more information on
-# implementing distributed monitoring.
-# Values: 1 = obsess over services, 0 = do not obsess (default)
-
-obsess_over_services=0
-
-
-
-# OBSESSIVE COMPULSIVE SERVICE PROCESSOR COMMAND
-# This is the command that is run for every service check that is
-# processed by Nagios.  This command is executed only if the
-# obsess_over_services option (above) is set to 1.  The command 
-# argument is the short name of a command definition that you
-# define in your host configuration file. Read the HTML docs for
-# more information on implementing distributed monitoring.
-
-#ocsp_command=somecommand
-
-
-
-# OBSESS OVER HOST CHECKS OPTION
-# This determines whether or not Nagios will obsess over host
-# checks and run the ochp_command defined below.  Unless you're
-# planning on implementing distributed monitoring, do not enable
-# this option.  Read the HTML docs for more information on
-# implementing distributed monitoring.
-# Values: 1 = obsess over hosts, 0 = do not obsess (default)
-
-obsess_over_hosts=0
-
-
-
-# OBSESSIVE COMPULSIVE HOST PROCESSOR COMMAND
-# This is the command that is run for every host check that is
-# processed by Nagios.  This command is executed only if the
-# obsess_over_hosts option (above) is set to 1.  The command 
-# argument is the short name of a command definition that you
-# define in your host configuration file. Read the HTML docs for
-# more information on implementing distributed monitoring.
-
-#ochp_command=somecommand
-
-
-
-# TRANSLATE PASSIVE HOST CHECKS OPTION
-# This determines whether or not Nagios will translate
-# DOWN/UNREACHABLE passive host check results into their proper
-# state for this instance of Nagios.  This option is useful
-# if you have distributed or failover monitoring setup.  In
-# these cases your other Nagios servers probably have a different
-# "view" of the network, with regards to the parent/child relationship
-# of hosts.  If a distributed monitoring server thinks a host
-# is DOWN, it may actually be UNREACHABLE from the point of
-# this Nagios instance.  Enabling this option will tell Nagios
-# to translate any DOWN or UNREACHABLE host states it receives
-# passively into the correct state from the view of this server.
-# Values: 1 = perform translation, 0 = do not translate (default)
-
-translate_passive_host_checks=0
-
-
-
-# PASSIVE HOST CHECKS ARE SOFT OPTION
-# This determines whether or not Nagios will treat passive host
-# checks as being HARD or SOFT.  By default, a passive host check
-# result will put a host into a HARD state type.  This can be changed
-# by enabling this option.
-# Values: 0 = passive checks are HARD, 1 = passive checks are SOFT
-
-passive_host_checks_are_soft=0
-
-
-
-# ORPHANED HOST/SERVICE CHECK OPTIONS
-# These options determine whether or not Nagios will periodically 
-# check for orphaned host service checks.  Since service checks are
-# not rescheduled until the results of their previous execution 
-# instance are processed, there exists a possibility that some
-# checks may never get rescheduled.  A similar situation exists for
-# host checks, although the exact scheduling details differ a bit
-# from service checks.  Orphaned checks seem to be a rare
-# problem and should not happen under normal circumstances.
-# If you have problems with service checks never getting
-# rescheduled, make sure you have orphaned service checks enabled.
-# Values: 1 = enable checks, 0 = disable checks
-
-check_for_orphaned_services=1
-check_for_orphaned_hosts=1
-
-
-
-# SERVICE FRESHNESS CHECK OPTION
-# This option determines whether or not Nagios will periodically
-# check the "freshness" of service results.  Enabling this option
-# is useful for ensuring passive checks are received in a timely
-# manner.
-# Values: 1 = enabled freshness checking, 0 = disable freshness checking
-
-check_service_freshness=1
-
-
-
-# SERVICE FRESHNESS CHECK INTERVAL
-# This setting determines how often (in seconds) Nagios will
-# check the "freshness" of service check results.  If you have
-# disabled service freshness checking, this option has no effect.
-
-service_freshness_check_interval=60
-
-
-
-# HOST FRESHNESS CHECK OPTION
-# This option determines whether or not Nagios will periodically
-# check the "freshness" of host results.  Enabling this option
-# is useful for ensuring passive checks are received in a timely
-# manner.
-# Values: 1 = enabled freshness checking, 0 = disable freshness checking
-
-check_host_freshness=0
-
-
-
-# HOST FRESHNESS CHECK INTERVAL
-# This setting determines how often (in seconds) Nagios will
-# check the "freshness" of host check results.  If you have
-# disabled host freshness checking, this option has no effect.
-
-host_freshness_check_interval=60
-
-
-
-
-# ADDITIONAL FRESHNESS THRESHOLD LATENCY
-# This setting determines the number of seconds that Nagios
-# will add to any host and service freshness thresholds that
-# it calculates (those not explicitly specified by the user).
-
-additional_freshness_latency=15
-
-
-
-
-# FLAP DETECTION OPTION
-# This option determines whether or not Nagios will try
-# and detect hosts and services that are "flapping".  
-# Flapping occurs when a host or service changes between
-# states too frequently.  When Nagios detects that a 
-# host or service is flapping, it will temporarily suppress
-# notifications for that host/service until it stops
-# flapping.  Flap detection is very experimental, so read
-# the HTML documentation before enabling this feature!
-# Values: 1 = enable flap detection
-#         0 = disable flap detection (default)
-
-enable_flap_detection=1
-
-
-
-# FLAP DETECTION THRESHOLDS FOR HOSTS AND SERVICES
-# Read the HTML documentation on flap detection for
-# an explanation of what this option does.  This option
-# has no effect if flap detection is disabled.
-
-low_service_flap_threshold=5.0
-high_service_flap_threshold=20.0
-low_host_flap_threshold=5.0
-high_host_flap_threshold=20.0
-
-
-
-# DATE FORMAT OPTION
-# This option determines how short dates are displayed. Valid options
-# include:
-#	us		(MM-DD-YYYY HH:MM:SS)
-#	euro    	(DD-MM-YYYY HH:MM:SS)
-#	iso8601		(YYYY-MM-DD HH:MM:SS)
-#	strict-iso8601	(YYYY-MM-DDTHH:MM:SS)
-#
-
-date_format=us
-
-
-
-
-# TIMEZONE OFFSET
-# This option is used to override the default timezone that this
-# instance of Nagios runs in.  If not specified, Nagios will use
-# the system configured timezone.
-#
-# NOTE: In order to display the correct timezone in the CGIs, you
-# will also need to alter the Apache directives for the CGI path 
-# to include your timezone.  Example:
-#
-#   <Directory "/usr/local/nagios/sbin/">
-#      SetEnv TZ "Australia/Brisbane"
-#      ...
-#   </Directory>
-
-#use_timezone=US/Mountain
-#use_timezone=Australia/Brisbane
-
-
-
-
-# P1.PL FILE LOCATION
-# This value determines where the p1.pl perl script (used by the
-# embedded Perl interpreter) is located.  If you didn't compile
-# Nagios with embedded Perl support, this option has no effect.
-
-p1_file = {{nagios_p1_pl}}
-
-
-
-# EMBEDDED PERL INTERPRETER OPTION
-# This option determines whether or not the embedded Perl interpreter
-# will be enabled during runtime.  This option has no effect if Nagios
-# has not been compiled with support for embedded Perl.
-# Values: 0 = disable interpreter, 1 = enable interpreter
-
-enable_embedded_perl=1
-
-
-
-# EMBEDDED PERL USAGE OPTION
-# This option determines whether or not Nagios will process Perl plugins
-# and scripts with the embedded Perl interpreter if the plugins/scripts
-# do not explicitly indicate whether or not it is okay to do so. Read
-# the HTML documentation on the embedded Perl interpreter for more 
-# information on how this option works.
-
-use_embedded_perl_implicitly=1
-
-
-
-# ILLEGAL OBJECT NAME CHARACTERS
-# This option allows you to specify illegal characters that cannot
-# be used in host names, service descriptions, or names of other
-# object types.
-
-illegal_object_name_chars=`~!$%^&*|'"<>?,()=
-
-
-
-# ILLEGAL MACRO OUTPUT CHARACTERS
-# This option allows you to specify illegal characters that are
-# stripped from macros before being used in notifications, event
-# handlers, etc.  This DOES NOT affect macros used in service or
-# host check commands.
-# The following macros are stripped of the characters you specify:
-#	$HOSTOUTPUT$
-#	$HOSTPERFDATA$
-#	$HOSTACKAUTHOR$
-#	$HOSTACKCOMMENT$
-#	$SERVICEOUTPUT$
-#	$SERVICEPERFDATA$
-#	$SERVICEACKAUTHOR$
-#	$SERVICEACKCOMMENT$
-
-illegal_macro_output_chars=`~$&|'"<>
-
-
-
-# REGULAR EXPRESSION MATCHING
-# This option controls whether or not regular expression matching
-# takes place in the object config files.  Regular expression
-# matching is used to match host, hostgroup, service, and service
-# group names/descriptions in some fields of various object types.
-# Values: 1 = enable regexp matching, 0 = disable regexp matching
-
-use_regexp_matching=0
-
-
-
-# "TRUE" REGULAR EXPRESSION MATCHING
-# This option controls whether or not "true" regular expression 
-# matching takes place in the object config files.  This option
-# only has an effect if regular expression matching is enabled
-# (see above).  If this option is DISABLED, regular expression
-# matching only occurs if a string contains wildcard characters
-# (* and ?).  If the option is ENABLED, regexp matching occurs
-# all the time (which can be annoying).
-# Values: 1 = enable true matching, 0 = disable true matching
-
-use_true_regexp_matching=0
-
-
-
-# ADMINISTRATOR EMAIL/PAGER ADDRESSES
-# The email and pager address of a global administrator (likely you).
-# Nagios never uses these values itself, but you can access them by
-# using the $ADMINEMAIL$ and $ADMINPAGER$ macros in your notification
-# commands.
-
-admin_email=nagios@localhost
-admin_pager=pagenagios@localhost
-
-
-
-# DAEMON CORE DUMP OPTION
-# This option determines whether or not Nagios is allowed to create
-# a core dump when it runs as a daemon.  Note that it is generally
-# considered bad form to allow this, but it may be useful for
-# debugging purposes.  Enabling this option doesn't guarantee that
-# a core file will be produced, but that's just life...
-# Values: 1 - Allow core dumps
-#         0 - Do not allow core dumps (default)
-
-daemon_dumps_core=0
-
-
-
-# LARGE INSTALLATION TWEAKS OPTION
-# This option determines whether or not Nagios will take some shortcuts
-# which can save on memory and CPU usage in large Nagios installations.
-# Read the documentation for more information on the benefits/tradeoffs
-# of enabling this option.
-# Values: 1 - Enabled tweaks
-#         0 - Disable tweaks (default)
-
-use_large_installation_tweaks=0
-
-
-
-# ENABLE ENVIRONMENT MACROS
-# This option determines whether or not Nagios will make all standard
-# macros available as environment variables when host/service checks
-# and system commands (event handlers, notifications, etc.) are
-# executed.  Enabling this option can cause performance issues in 
-# large installations, as it will consume a bit more memory and (more
-# importantly) consume more CPU.
-# Values: 1 - Enable environment variable macros (default)
-#         0 - Disable environment variable macros
-
-enable_environment_macros=1
-
-
-
-# CHILD PROCESS MEMORY OPTION
-# This option determines whether or not Nagios will free memory in
-# child processes (processed used to execute system commands and host/
-# service checks).  If you specify a value here, it will override
-# program defaults.
-# Value: 1 - Free memory in child processes
-#        0 - Do not free memory in child processes
-
-#free_child_process_memory=1
-
-
-
-# CHILD PROCESS FORKING BEHAVIOR
-# This option determines how Nagios will fork child processes
-# (used to execute system commands and host/service checks).  Normally
-# child processes are fork()ed twice, which provides a very high level
-# of isolation from problems.  Fork()ing once is probably enough and will
-# save a great deal on CPU usage (in large installs), so you might
-# want to consider using this.  If you specify a value here, it will
-# program defaults.
-# Value: 1 - Child processes fork() twice
-#        0 - Child processes fork() just once
-
-#child_processes_fork_twice=1
-
-
-
-# DEBUG LEVEL
-# This option determines how much (if any) debugging information will
-# be written to the debug file.  OR values together to log multiple
-# types of information.
-# Values: 
-#          -1 = Everything
-#          0 = Nothing
-#	   1 = Functions
-#          2 = Configuration
-#          4 = Process information
-#	   8 = Scheduled events
-#          16 = Host/service checks
-#          32 = Notifications
-#          64 = Event broker
-#          128 = External commands
-#          256 = Commands
-#          512 = Scheduled downtime
-#          1024 = Comments
-#          2048 = Macros
-
-debug_level=0
-
-
-
-# DEBUG VERBOSITY
-# This option determines how verbose the debug log out will be.
-# Values: 0 = Brief output
-#         1 = More detailed
-#         2 = Very detailed
-
-debug_verbosity=1
-
-
-
-# DEBUG FILE
-# This option determines where Nagios should write debugging information.
-
-debug_file=/var/log/nagios/nagios.debug
-
-
-
-# MAX DEBUG FILE SIZE
-# This option determines the maximum size (in bytes) of the debug file.  If
-# the file grows larger than this size, it will be renamed with a .old
-# extension.  If a file already exists with a .old extension it will
-# automatically be deleted.  This helps ensure your disk space usage doesn't
-# get out of control when debugging Nagios.
-
-max_debug_file_size=1000000
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/templates/nagios.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/templates/nagios.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/templates/nagios.conf.j2
deleted file mode 100644
index d8936a0..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/templates/nagios.conf.j2
+++ /dev/null
@@ -1,62 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-#
-# SAMPLE CONFIG SNIPPETS FOR APACHE WEB SERVER
-# Last Modified: 11-26-2005
-#
-# This file contains examples of entries that need
-# to be incorporated into your Apache web server
-# configuration file.  Customize the paths, etc. as
-# needed to fit your system.
-#
-
-ScriptAlias /nagios/cgi-bin "/usr/lib/nagios/cgi"
-
-<Directory "/usr/lib/nagios/cgi">
-#  SSLRequireSSL
-   Options ExecCGI
-   AllowOverride None
-   Order allow,deny
-   Allow from all
-#  Order deny,allow
-#  Deny from all
-#  Allow from 127.0.0.1
-   AuthName "Nagios Access"
-   AuthType Basic
-   AuthUserFile /etc/nagios/htpasswd.users
-   Require valid-user
-</Directory>
-
-Alias /nagios "/usr/share/nagios"
-
-<Directory "/usr/share/nagios">
-#  SSLRequireSSL
-   Options None
-   AllowOverride None
-   Order allow,deny
-   Allow from all
-#  Order deny,allow
-#  Deny from all
-#  Allow from 127.0.0.1
-   AuthName "Nagios Access"
-   AuthType Basic
-   AuthUserFile /etc/nagios/htpasswd.users
-   Require valid-user
-</Directory>
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/templates/nagios.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/templates/nagios.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/templates/nagios.j2
deleted file mode 100644
index 01e21ac..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/templates/nagios.j2
+++ /dev/null
@@ -1,146 +0,0 @@
-#!/bin/sh
-# $Id$
-# Nagios	Startup script for the Nagios monitoring daemon
-#
-# chkconfig:	- 85 15
-# description:	Nagios is a service monitoring system
-# processname: nagios
-# config: /etc/nagios/nagios.cfg
-# pidfile: /var/nagios/nagios.pid
-#
-### BEGIN INIT INFO
-# Provides:		nagios
-# Required-Start:	$local_fs $syslog $network
-# Required-Stop:	$local_fs $syslog $network
-# Short-Description:    start and stop Nagios monitoring server
-# Description:		Nagios is is a service monitoring system 
-### END INIT INFO
-
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-# Source function library.
-. /etc/rc.d/init.d/functions
-
-prefix="/usr"
-exec_prefix="/usr"
-exec="/usr/sbin/nagios"
-prog="nagios"
-config="/etc/nagios/nagios.cfg"
-pidfile="{{nagios_pid_file}}"
-user="{{nagios_user}}"
-
-[ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog
-
-lockfile=/var/lock/subsys/$prog
-
-start() {
-    [ -x $exec ] || exit 5
-    [ -f $config ] || exit 6
-    echo -n $"Starting $prog: "
-    daemon --user=$user $exec -d $config
-    retval=$?
-    echo
-    [ $retval -eq 0 ] && touch $lockfile
-    return $retval
-}
-
-stop() {
-    echo -n $"Stopping $prog: "
-    killproc -d 10 $exec
-    retval=$?
-    echo
-    [ $retval -eq 0 ] && rm -f $lockfile
-    return $retval
-}
-
-
-restart() {
-    stop
-    start
-}
-
-reload() {
-    echo -n $"Reloading $prog: "
-    killproc $exec -HUP
-    RETVAL=$?
-    echo
-}
-
-force_reload() {
-    restart
-}
-
-check_config() {
-        $nice runuser -s /bin/bash - $user -c "$corelimit >/dev/null 2>&1 ; $exec -v $config > /dev/null 2>&1"
-        RETVAL=$?
-        if [ $RETVAL -ne 0 ] ; then
-                echo -n $"Configuration validation failed"
-                failure
-                echo
-                exit 1
-
-        fi
-}
-
-
-case "$1" in
-    start)
-        status $prog && exit 0
-	check_config
-        $1
-        ;;
-    stop)
-        status $prog|| exit 0
-        $1
-        ;;
-    restart)
-	check_config
-        $1
-        ;;
-    reload)
-        status $prog || exit 7
-	check_config
-        $1
-        ;;
-    force-reload)
-	check_config
-        force_reload
-        ;;
-    status)
-        status $prog
-        ;;
-    condrestart|try-restart)
-        status $prog|| exit 0
-	check_config
-        restart
-        ;;
-    configtest)
-        echo -n  $"Checking config for $prog: "
-        check_config && success
-        echo
-	;;
-    *)
-        echo $"Usage: $0 {start|stop|status|restart|condrestart|try-restart|reload|force-reload|configtest}"
-        exit 2
-esac
-exit $?

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/templates/resource.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/templates/resource.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/templates/resource.cfg.j2
deleted file mode 100644
index 23c7a56..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/NAGIOS/package/templates/resource.cfg.j2
+++ /dev/null
@@ -1,33 +0,0 @@
-###########################################################################
-#
-# RESOURCE.CFG - Sample Resource File for Nagios 3.2.3
-#
-# Last Modified: 09-10-2003
-#
-# You can define $USERx$ macros in this file, which can in turn be used
-# in command definitions in your host config file(s).  $USERx$ macros are
-# useful for storing sensitive information such as usernames, passwords,
-# etc.  They are also handy for specifying the path to plugins and
-# event handlers - if you decide to move the plugins or event handlers to
-# a different directory in the future, you can just update one or two
-# $USERx$ macros, instead of modifying a lot of command definitions.
-#
-# The CGIs will not attempt to read the contents of resource files, so
-# you can set restrictive permissions (600 or 660) on them.
-#
-# Nagios supports up to 32 $USERx$ macros ($USER1$ through $USER32$)
-#
-# Resource files may also be used to store configuration directives for
-# external data sources like MySQL...
-#
-###########################################################################
-
-# Sets $USER1$ to be the path to the plugins
-$USER1$={{plugins_dir}}
-
-# Sets $USER2$ to be the path to event handlers
-#$USER2$={{eventhandlers_dir}}
-
-# Store some usernames and passwords (hidden from the CGIs)
-#$USER3$=someuser
-#$USER4$=somepassword
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/configuration/oozie-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/configuration/oozie-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/configuration/oozie-site.xml
deleted file mode 100644
index bf4533f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/configuration/oozie-site.xml
+++ /dev/null
@@ -1,313 +0,0 @@
-<?xml version="1.0"?>
-<!--
-  Licensed to the Apache Software Foundation (ASF) under one
-  or more contributor license agreements.  See the NOTICE file
-  distributed with this work for additional information
-  regarding copyright ownership.  The ASF licenses this file
-  to you under the Apache License, Version 2.0 (the
-  "License"); you may not use this file except in compliance
-  with the License.  You may obtain a copy of the License at
-        
-       http://www.apache.org/licenses/LICENSE-2.0
-  
-  Unless required by applicable law or agreed to in writing, software
-  distributed under the License is distributed on an "AS IS" BASIS,
-  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-  See the License for the specific language governing permissions and
-  limitations under the License.
--->     
-
-<configuration>
-
-<!--
-    Refer to the oozie-default.xml file for the complete list of
-    Oozie configuration properties and their default values.
--->
-  <property>
-    <name>oozie.base.url</name>
-    <value>http://localhost:11000/oozie</value>
-    <description>Base Oozie URL.</description>
-   </property>
-
-  <property>
-    <name>oozie.system.id</name>
-    <value>oozie-${user.name}</value>
-    <description>
-    The Oozie system ID.
-    </description>
-   </property>
-
-   <property>
-     <name>oozie.systemmode</name>
-     <value>NORMAL</value>
-     <description>
-     System mode for  Oozie at startup.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.AuthorizationService.security.enabled</name>
-     <value>true</value>
-     <description>
-     Specifies whether security (user name/admin role) is enabled or not.
-     If disabled any user can manage Oozie system and manage any job.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.PurgeService.older.than</name>
-     <value>30</value>
-     <description>
-     Jobs older than this value, in days, will be purged by the PurgeService.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.PurgeService.purge.interval</name>
-     <value>3600</value>
-     <description>
-     Interval at which the purge service will run, in seconds.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.CallableQueueService.queue.size</name>
-     <value>1000</value>
-     <description>Max callable queue size</description>
-   </property>
-
-   <property>
-     <name>oozie.service.CallableQueueService.threads</name>
-     <value>10</value>
-     <description>Number of threads used for executing callables</description>
-   </property>
-
-   <property>
-     <name>oozie.service.CallableQueueService.callable.concurrency</name>
-     <value>3</value>
-     <description>
-     Maximum concurrency for a given callable type.
-     Each command is a callable type (submit, start, run, signal, job, jobs, suspend,resume, etc).
-     Each action type is a callable type (Map-Reduce, Pig, SSH, FS, sub-workflow, etc).
-     All commands that use action executors (action-start, action-end, action-kill and action-check) use
-     the action type as the callable type.
-     </description>
-   </property>
-
-   <property>
-     <name>oozie.service.coord.normal.default.timeout</name>
-     <value>120</value>
-     <description>Default timeout for a coordinator action input check (in minutes) for normal job.
-      -1 means infinite timeout</description>
-   </property>
-
-   <property>
-     <name>oozie.db.schema.name</name>
-     <value>oozie</value>
-     <description>
-      Oozie DataBase Name
-     </description>
-   </property>
-
-    <property>
-      <name>oozie.service.HadoopAccessorService.jobTracker.whitelist</name>
-      <value> </value>
-      <description>
-      Whitelisted job tracker for Oozie service.
-      </description>
-    </property>
-   
-    <property>
-      <name>oozie.authentication.type</name>
-      <value>simple</value>
-      <description>
-      </description>
-    </property>
-   
-    <property>
-      <name>oozie.service.HadoopAccessorService.nameNode.whitelist</name>
-      <value> </value>
-      <description>
-      </description>
-    </property>
-
-    <property>
-      <name>oozie.service.WorkflowAppService.system.libpath</name>
-      <value>/user/${user.name}/share/lib</value>
-      <description>
-      System library path to use for workflow applications.
-      This path is added to workflow application if their job properties sets
-      the property 'oozie.use.system.libpath' to true.
-      </description>
-    </property>
-
-    <property>
-      <name>use.system.libpath.for.mapreduce.and.pig.jobs</name>
-      <value>false</value>
-      <description>
-      If set to true, submissions of MapReduce and Pig jobs will include
-      automatically the system library path, thus not requiring users to
-      specify where the Pig JAR files are. Instead, the ones from the system
-      library path are used.
-      </description>
-    </property>
-    <property>
-      <name>oozie.authentication.kerberos.name.rules</name>
-      <value>
-        RULE:[2:$1@$0]([jt]t@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-MAPREDUSER/
-        RULE:[2:$1@$0]([nd]n@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HDFSUSER/
-        RULE:[2:$1@$0](hm@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
-        RULE:[2:$1@$0](rs@.*TODO-KERBEROS-DOMAIN)s/.*/TODO-HBASE-USER/
-        DEFAULT
-        </value>
-      <description>The mapping from kerberos principal names to local OS user names.</description>
-    </property>
-    <property>
-      <name>oozie.service.HadoopAccessorService.hadoop.configurations</name>
-      <value>*=/etc/hadoop/conf</value>
-      <description>
-          Comma separated AUTHORITY=HADOOP_CONF_DIR, where AUTHORITY is the HOST:PORT of
-          the Hadoop service (JobTracker, HDFS). The wildcard '*' configuration is
-          used when there is no exact match for an authority. The HADOOP_CONF_DIR contains
-          the relevant Hadoop *-site.xml files. If the path is relative is looked within
-          the Oozie configuration directory; though the path can be absolute (i.e. to point
-          to Hadoop client conf/ directories in the local filesystem.
-      </description>
-    </property>
-    <property>
-        <name>oozie.service.ActionService.executor.ext.classes</name>
-        <value>
-            org.apache.oozie.action.email.EmailActionExecutor,
-            org.apache.oozie.action.hadoop.HiveActionExecutor,
-            org.apache.oozie.action.hadoop.ShellActionExecutor,
-            org.apache.oozie.action.hadoop.SqoopActionExecutor,
-            org.apache.oozie.action.hadoop.DistcpActionExecutor
-        </value>
-    </property>
-
-    <property>
-        <name>oozie.service.SchemaService.wf.ext.schemas</name>
-        <value>shell-action-0.1.xsd,email-action-0.1.xsd,hive-action-0.2.xsd,sqoop-action-0.2.xsd,ssh-action-0.1.xsd,distcp-action-0.1.xsd,shell-action-0.2.xsd,oozie-sla-0.1.xsd,oozie-sla-0.2.xsd</value>
-    </property>
-    <property>
-        <name>oozie.service.JPAService.create.db.schema</name>
-        <value>false</value>
-        <description>
-            Creates Oozie DB.
-
-            If set to true, it creates the DB schema if it does not exist. If the DB schema exists is a NOP.
-            If set to false, it does not create the DB schema. If the DB schema does not exist it fails start up.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.driver</name>
-        <value>org.apache.derby.jdbc.EmbeddedDriver</value>
-        <description>
-            JDBC driver class.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.url</name>
-        <value>jdbc:derby:${oozie.data.dir}/${oozie.db.schema.name}-db;create=true</value>
-        <description>
-            JDBC URL.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.username</name>
-        <value>oozie</value>
-        <description>
-          Database user name to use to connect to the database
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.jdbc.password</name>
-        <value> </value>
-        <description>
-            DB user password.
-
-            IMPORTANT: if password is emtpy leave a 1 space string, the service trims the value,
-                       if empty Configuration assumes it is NULL.
-        </description>
-    </property>
-
-    <property>
-        <name>oozie.service.JPAService.pool.max.active.conn</name>
-        <value>10</value>
-        <description>
-             Max number of connections.
-        </description>
-    </property>
-
-    <property>
-      <name>oozie.services</name>
-      <value>
-        org.apache.oozie.service.SchedulerService,
-        org.apache.oozie.service.InstrumentationService,
-        org.apache.oozie.service.CallableQueueService,
-        org.apache.oozie.service.UUIDService,
-        org.apache.oozie.service.ELService,
-        org.apache.oozie.service.AuthorizationService,
-        org.apache.oozie.service.UserGroupInformationService,
-        org.apache.oozie.service.HadoopAccessorService,
-        org.apache.oozie.service.URIHandlerService,
-        org.apache.oozie.service.MemoryLocksService,
-        org.apache.oozie.service.DagXLogInfoService,
-        org.apache.oozie.service.SchemaService,
-        org.apache.oozie.service.LiteWorkflowAppService,
-        org.apache.oozie.service.JPAService,
-        org.apache.oozie.service.StoreService,
-        org.apache.oozie.service.CoordinatorStoreService,
-        org.apache.oozie.service.SLAStoreService,
-        org.apache.oozie.service.DBLiteWorkflowStoreService,
-        org.apache.oozie.service.CallbackService,
-        org.apache.oozie.service.ActionService,
-        org.apache.oozie.service.ActionCheckerService,
-        org.apache.oozie.service.RecoveryService,
-        org.apache.oozie.service.PurgeService,
-        org.apache.oozie.service.CoordinatorEngineService,
-        org.apache.oozie.service.BundleEngineService,
-        org.apache.oozie.service.DagEngineService,
-        org.apache.oozie.service.CoordMaterializeTriggerService,
-        org.apache.oozie.service.StatusTransitService,
-        org.apache.oozie.service.PauseTransitService,
-        org.apache.oozie.service.GroupsService,
-        org.apache.oozie.service.ProxyUserService
-      </value>
-      <description>List of Oozie services</description>
-    </property>
-    <property>
-      <name>oozie.service.URIHandlerService.uri.handlers</name>
-      <value>org.apache.oozie.dependency.FSURIHandler,org.apache.oozie.dependency.HCatURIHandler</value>
-      <description>
-        Enlist the different uri handlers supported for data availability checks.
-      </description>
-    </property>
-    <property>
-    <name>oozie.services.ext</name>
-    <value>org.apache.oozie.service.PartitionDependencyManagerService,org.apache.oozie.service.HCatAccessorService</value>
-    <description>
-       To add/replace services defined in 'oozie.services' with custom implementations.
-       Class names must be separated by commas.
-    </description>
-    </property>
-    <property>
-    <name>oozie.service.coord.push.check.requeue.interval</name>
-    <value>30000</value>
-    <description>
-        Command re-queue interval for push dependencies (in millisecond).
-    </description>
-    </property>
-    <property>
-      <name>oozie.credentials.credentialclasses</name>
-      <value>hcat=org.apache.oozie.action.hadoop.HCatCredentials</value>
-      <description>
-        Credential Class to be used for HCat.
-      </description>
-    </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/metainfo.xml
deleted file mode 100644
index 79e3501..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/metainfo.xml
+++ /dev/null
@@ -1,84 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>OOZIE</name>
-      <comment>System for workflow coordination and execution of Apache Hadoop jobs.  This also includes the installation of the optional Oozie Web Console which relies on and will install the &lt;a target="_blank" href="http://www.sencha.com/products/extjs/license/"&gt;ExtJS&lt;/a&gt; Library.
-      </comment>
-      <version>4.0.0.2.0.6.0</version>
-      <components>
-        <component>
-          <name>OOZIE_SERVER</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/oozie_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>OOZIE_CLIENT</name>
-          <category>CLIENT</category>
-          <commandScript>
-            <script>scripts/oozie_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>oozie.noarch</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>oozie-client.noarch</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>extjs-2.2-1</name>
-            </package>
-            <!--TODO: uncomment this after package will be available in repo-->
-            <!--<package>-->
-              <!--<type>rpm</type>-->
-              <!--<name>falcon-0.4.0.2.0.6.0-76</name>-->
-            <!--</package>-->
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <configuration-dependencies>
-        <config-type>global</config-type>
-        <config-type>oozie-site</config-type>
-      </configuration-dependencies>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/package/files/oozieSmoke2.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/package/files/oozieSmoke2.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/package/files/oozieSmoke2.sh
deleted file mode 100644
index 2cb5a7a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/package/files/oozieSmoke2.sh
+++ /dev/null
@@ -1,95 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-function getValueFromField {
-  xmllint $1 | grep "<name>$2</name>" -C 2 | grep '<value>' | cut -d ">" -f2 | cut -d "<" -f1
-  return $?
-}
-
-function checkOozieJobStatus {
-  local job_id=$1
-  local num_of_tries=$2
-  #default num_of_tries to 10 if not present
-  num_of_tries=${num_of_tries:-10}
-  local i=0
-  local rc=1
-  local cmd="source ${oozie_conf_dir}/oozie-env.sh ; /usr/bin/oozie job -oozie ${OOZIE_SERVER} -info $job_id"
-  su - ${smoke_test_user} -c "$cmd"
-  while [ $i -lt $num_of_tries ] ; do
-    cmd_output=`su - ${smoke_test_user} -c "$cmd"`
-    (IFS='';echo $cmd_output)
-    act_status=$(IFS='';echo $cmd_output | grep ^Status | cut -d':' -f2 | sed 's| ||g')
-    echo "workflow_status=$act_status"
-    if [ "RUNNING" == "$act_status" ]; then
-      #increment the couner and get the status again after waiting for 15 secs
-      sleep 15
-      (( i++ ))
-      elif [ "SUCCEEDED" == "$act_status" ]; then
-        rc=0;
-        break;
-      else
-        rc=1
-        break;
-      fi
-    done
-    return $rc
-}
-
-export oozie_conf_dir=$1
-export hadoop_conf_dir=$2
-export smoke_test_user=$3
-export security_enabled=$4
-export smoke_user_keytab=$5
-export kinit_path_local=$6
-
-export OOZIE_EXIT_CODE=0
-export JOBTRACKER=`getValueFromField ${hadoop_conf_dir}/yarn-site.xml yarn.resourcemanager.address`
-export NAMENODE=`getValueFromField ${hadoop_conf_dir}/core-site.xml fs.defaultFS`
-export OOZIE_SERVER=`getValueFromField ${oozie_conf_dir}/oozie-site.xml oozie.base.url | tr '[:upper:]' '[:lower:]'`
-export OOZIE_EXAMPLES_DIR=`rpm -ql oozie-client | grep 'oozie-examples.tar.gz$' | xargs dirname`
-cd $OOZIE_EXAMPLES_DIR
-
-tar -zxf oozie-examples.tar.gz
-sed -i "s|nameNode=hdfs://localhost:8020|nameNode=$NAMENODE|g"  examples/apps/map-reduce/job.properties
-sed -i "s|nameNode=hdfs://localhost:9000|nameNode=$NAMENODE|g"  examples/apps/map-reduce/job.properties
-sed -i "s|jobTracker=localhost:8021|jobTracker=$JOBTRACKER|g" examples/apps/map-reduce/job.properties
-sed -i "s|jobTracker=localhost:9001|jobTracker=$JOBTRACKER|g" examples/apps/map-reduce/job.properties
-sed -i "s|jobTracker=localhost:8032|jobTracker=$JOBTRACKER|g" examples/apps/map-reduce/job.properties
-sed -i "s|oozie.wf.application.path=hdfs://localhost:9000|oozie.wf.application.path=$NAMENODE|g" examples/apps/map-reduce/job.properties
-
-if [[ $security_enabled == "true" ]]; then
-  kinitcmd="${kinit_path_local} -kt ${smoke_user_keytab} ${smoke_test_user}; "
-else 
-  kinitcmd=""
-fi
-
-su - ${smoke_test_user} -c "hdfs dfs -rm -r examples"
-su - ${smoke_test_user} -c "hdfs dfs -rm -r input-data"
-su - ${smoke_test_user} -c "hdfs dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples examples"
-su - ${smoke_test_user} -c "hdfs dfs -copyFromLocal $OOZIE_EXAMPLES_DIR/examples/input-data input-data"
-
-cmd="${kinitcmd}source ${oozie_conf_dir}/oozie-env.sh ; /usr/bin/oozie -Doozie.auth.token.cache=false job -oozie $OOZIE_SERVER -config $OOZIE_EXAMPLES_DIR/examples/apps/map-reduce/job.properties  -run"
-echo $cmd
-job_info=`su - ${smoke_test_user} -c "$cmd" | grep "job:"`
-job_id="`echo $job_info | cut -d':' -f2`"
-checkOozieJobStatus "$job_id"
-OOZIE_EXIT_CODE="$?"
-exit $OOZIE_EXIT_CODE

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/package/files/wrap_ooziedb.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/package/files/wrap_ooziedb.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/package/files/wrap_ooziedb.sh
deleted file mode 100644
index 97a513c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/package/files/wrap_ooziedb.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-OUT=`cd /var/tmp/oozie && /usr/lib/oozie/bin/ooziedb.sh "$@" 2>&1`
-EC=$?
-echo $OUT
-GRVAR=`echo ${OUT} | grep -o "java.lang.Exception: DB schema exists"`
-if [ ${EC} -ne 0 ] && [ -n "$GRVAR" ]
-then
-  exit 0
-else
-  exit $EC
-fi  

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/package/scripts/oozie.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/package/scripts/oozie.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/package/scripts/oozie.py
deleted file mode 100644
index 1422d1e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/package/scripts/oozie.py
+++ /dev/null
@@ -1,122 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-def oozie(is_server=False # TODO: see if see can remove this
-              ):
-  import params
-  #TODO hack for falcon el
-  oozie_site = dict(params.config['configurations']['oozie-site'])
-  oozie_site["oozie.services.ext"] = 'org.apache.oozie.service.JMSAccessorService,' + oozie_site["oozie.services.ext"]
-  XmlConfig( "oozie-site.xml",
-    conf_dir = params.conf_dir, 
-    configurations = oozie_site,
-    owner = params.oozie_user,
-    group = params.user_group,
-    mode = 0664
-  )
-  Directory( params.conf_dir,
-    owner = params.oozie_user,
-    group = params.user_group
-  )
-  
-  TemplateConfig( format("{conf_dir}/oozie-env.sh"),
-    owner = params.oozie_user
-  )
-  
-  TemplateConfig( format("{conf_dir}/oozie-log4j.properties"),
-    owner = params.oozie_user
-  )
-
-  if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
-    Execute(format("/bin/sh -c 'cd /usr/lib/ambari-agent/ &&\
-    curl -kf --retry 5 {jdk_location}{check_db_connection_jar_name}\
-     -o {check_db_connection_jar_name}'"),
-      not_if  = format("[ -f {check_db_connection_jar} ]")
-    )
-    
-  oozie_ownership( )
-  
-  if is_server:      
-    oozie_server_specific( )
-  
-def oozie_ownership(
-):
-  import params
-  
-  File ( format("{conf_dir}/adminusers.txt"),
-    owner = params.oozie_user,
-    group = params.user_group
-  )
-
-  File ( format("{conf_dir}/hadoop-config.xml"),
-    owner = params.oozie_user,
-    group = params.user_group
-  )
-
-  File ( format("{conf_dir}/oozie-default.xml"),
-    owner = params.oozie_user,
-    group = params.user_group
-  )
-
-  Directory ( format("{conf_dir}/action-conf"),
-    owner = params.oozie_user,
-    group = params.user_group
-  )
-
-  File ( format("{conf_dir}/action-conf/hive.xml"),
-    owner = params.oozie_user,
-    group = params.user_group
-  )
-  
-def oozie_server_specific(
-):
-  import params
-  
-  oozie_server_directorties = [params.oozie_pid_dir, params.oozie_log_dir, params.oozie_tmp_dir, params.oozie_data_dir, params.oozie_lib_dir, params.oozie_webapps_dir]            
-  Directory( oozie_server_directorties,
-    owner = params.oozie_user,
-    mode = 0755,
-    recursive = True
-  )
-       
-  cmd1 = "cd /usr/lib/oozie && tar -xvf oozie-sharelib.tar.gz"
-  cmd2 =  format("cd /usr/lib/oozie && mkdir -p {oozie_tmp_dir}")
-  
-  # this is different for HDP1
-  cmd3 = format("cd /usr/lib/oozie && chown {oozie_user}:{user_group} {oozie_tmp_dir} && mkdir -p {oozie_libext_dir} && cp {ext_js_path} {oozie_libext_dir}")
-  if params.jdbc_driver_name=="com.mysql.jdbc.Driver" or params.jdbc_driver_name=="oracle.jdbc.driver.OracleDriver":
-    cmd3 += format(" && cp {jdbc_driver_jar} {oozie_libext_dir}")
-  #falcon el extension
-  if params.has_falcon_host:
-    Execute(format('cp {falcon_home}/oozie/ext/falcon-oozie-el-extension-0.4.0.2.0.6.0-76.jar {oozie_libext_dir}'))
-  # this is different for HDP1
-  cmd4 = format("cd {oozie_tmp_dir} && /usr/lib/oozie/bin/oozie-setup.sh prepare-war")
-  
-  no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
-  Execute( [cmd1, cmd2, cmd3],
-    not_if  = no_op_test
-  )
-  Execute( cmd4,
-    user = params.oozie_user,
-    not_if  = no_op_test
-  )
-  

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/package/scripts/oozie_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/package/scripts/oozie_client.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/package/scripts/oozie_client.py
deleted file mode 100644
index 1d5db39..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/package/scripts/oozie_client.py
+++ /dev/null
@@ -1,33 +0,0 @@
-import sys
-from resource_management import *
-
-from oozie import oozie
-from oozie_service import oozie_service
-
-         
-class OozieClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-    
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    oozie(is_server=False)
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-    
-def main():
-  command_type = sys.argv[1] if len(sys.argv)>1 else "install"
-  print "Running "+command_type
-  command_data_file = '/root/workspace/Oozie/input.json'
-  basedir = '/root/workspace/Oozie/main'
-  sys.argv = ["", command_type, command_data_file, basedir]
-  
-  OozieClient().execute()
-  
-if __name__ == "__main__":
-  #main()
-  OozieClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/package/scripts/oozie_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/package/scripts/oozie_server.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/package/scripts/oozie_server.py
deleted file mode 100644
index 6c00738..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/package/scripts/oozie_server.py
+++ /dev/null
@@ -1,47 +0,0 @@
-import sys
-from resource_management import *
-
-from oozie import oozie
-from oozie_service import oozie_service
-
-         
-class OozieServer(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-    
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    oozie(is_server=True)
-    
-  def start(self, env):
-    import params
-    env.set_params(params)
-    #TODO remove this when config command will be implemented
-    self.configure(env)
-    oozie_service(action='start')
-    
-  def stop(self, env):
-    import params
-    env.set_params(params)
-    oozie_service(action='stop')
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.pid_file)
-    
-def main():
-  command_type = sys.argv[1] if len(sys.argv)>1 else "start"
-  print "Running "+command_type
-  command_data_file = '/root/workspace/Oozie/input.json'
-  basedir = '/root/workspace/Oozie/main'
-  sys.argv = ["", command_type, command_data_file, basedir]
-  
-  OozieServer().execute()
-  
-if __name__ == "__main__":
-  #main()
-  OozieServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/package/scripts/oozie_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/package/scripts/oozie_service.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/package/scripts/oozie_service.py
deleted file mode 100644
index e9edcc9..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/OOZIE/package/scripts/oozie_service.py
+++ /dev/null
@@ -1,65 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-def oozie_service(action = 'start'): # 'start' or 'stop'
-  import params
-
-  kinit_if_needed = format("{kinit_path_local} -kt {oozie_keytab} {oozie_principal};") if params.security_enabled else ""
-  no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
-  
-  if action == 'start':
-    start_cmd = format("cd {oozie_tmp_dir} && /usr/lib/oozie/bin/oozie-start.sh")
-    
-    if params.jdbc_driver_name == "com.mysql.jdbc.Driver" or params.jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
-      db_connection_check_command = format("{java_home}/bin/java -cp {check_db_connection_jar}:{jdbc_driver_jar} org.apache.ambari.server.DBConnectionVerification {oozie_jdbc_connection_url} {oozie_metastore_user_name} {oozie_metastore_user_passwd} {jdbc_driver_name}")
-    else:
-      db_connection_check_command = None
-      
-    cmd1 =  format("cd {oozie_tmp_dir} && /usr/lib/oozie/bin/ooziedb.sh create -sqlfile oozie.sql -run")
-    cmd2 =  format("{kinit_if_needed} hadoop dfs -put /usr/lib/oozie/share {oozie_hdfs_user_dir} ; hadoop dfs -chmod -R 755 {oozie_hdfs_user_dir}/share")
-      
-    if db_connection_check_command:
-      Execute( db_connection_check_command)
-                  
-    Execute( cmd1,
-      user = params.oozie_user,
-      not_if  = no_op_test,
-      ignore_failures = True
-    ) 
-    
-    Execute( cmd2,
-      user = params.oozie_user,       
-      not_if = format("{kinit_if_needed} hadoop dfs -ls /user/oozie/share | awk 'BEGIN {{count=0;}} /share/ {{count++}} END {{if (count > 0) {{exit 0}} else {{exit 1}}}}'")
-    )
-    
-    Execute( start_cmd,
-      user = params.oozie_user,
-      not_if  = no_op_test,
-    )
-  elif action == 'stop':
-    stop_cmd  = format("su - {oozie_user} -c  'cd {oozie_tmp_dir} && /usr/lib/oozie/bin/oozie-stop.sh' && rm -f {pid_file}")
-    Execute( stop_cmd,
-      only_if  = no_op_test
-    )
-
-  
-  


[10/37] AMBARI-4341. Rename 2.0.8 to 2.1.1 in the stack definition. (mahadev)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/metrics.json b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/metrics.json
new file mode 100644
index 0000000..f33a0c0
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HDFS/metrics.json
@@ -0,0 +1,7800 @@
+{
+  "NAMENODE": {
+    "Component": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "metrics/dfs/FSNamesystem/TotalLoad": {
+            "metric": "dfs.FSNamesystem.TotalLoad",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/BlockCapacity": {
+            "metric": "dfs.FSNamesystem.BlockCapacity",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/GetListingOps": {
+            "metric": "dfs.namenode.GetListingOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FilesAppended": {
+            "metric": "dfs.namenode.FilesAppended",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getProtocolVersion_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_num_ops",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/fsync_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.FsyncAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginSuccess_avg_time": {
+            "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/load/load_one": {
+            "metric": "load_one",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/renewLease_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.RenewLeaseNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getFileInfo_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.GetFileInfoAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memNonHeapUsedM": {
+            "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/complete_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.CompleteAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setPermission_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.SetPermissionNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/CapacityTotalGB": {
+            "metric": "dfs.FSNamesystem.CapacityTotalGB",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setOwner_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.SetOwnerNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getBlockLocations_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.GetBlockLocationsNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/process/proc_run": {
+            "metric": "proc_run",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/CapacityUsedGB": {
+            "metric": "dfs.FSNamesystem.CapacityUsedGB",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/AddBlockOps": {
+            "metric": "dfs.namenode.AddBlockOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/swap_total": {
+            "metric": "swap_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FilesDeleted": {
+            "metric": "dfs.namenode.FilesDeleted",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/Syncs_avg_time": {
+            "metric": "dfs.namenode.SyncsAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsBlocked": {
+            "metric": "jvm.JvmMetrics.ThreadsBlocked",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/RpcQueueTime_num_ops": {
+            "metric": "rpc.rpc.RpcQueueTimeNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/process/proc_total": {
+            "metric": "proc_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/blockReport_avg_time": {
+            "metric": "dfs.namenode.BlockReportAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/disk/part_max_used": {
+            "metric": "part_max_used",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getFileInfo_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.GetFileInfoNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getEditLogSize_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginSuccess_num_ops": {
+            "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blockReceived_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.BlockReceivedAndDeletedAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_idle": {
+            "metric": "cpu_idle",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/versionRequest_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.VersionRequestAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_aidle": {
+            "metric": "cpu_aidle",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_free": {
+            "metric": "mem_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/versionRequest_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.VersionRequestNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/addBlock_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.AddBlockNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FilesCreated": {
+            "metric": "dfs.namenode.FilesCreated",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rename_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.RenameAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/bytes_in": {
+            "metric": "bytes_in",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setSafeMode_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.SetSafeModeNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/pkts_out": {
+            "metric": "pkts_out",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/memNonHeapCommittedM": {
+            "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/mem_cached": {
+            "metric": "mem_cached",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/disk/disk_total": {
+            "metric": "disk_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setPermission_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.SetPermissionAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FilesRenamed": {
+            "metric": "dfs.namenode.FilesRenamed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/register_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.RegisterDatanodeAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setReplication_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.setReplication_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/JournalTransactionsBatchedInSync": {
+            "metric": "dfs.namenode.JournalTransactionsBatchedInSync",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/ugi/loginFailure_num_ops": {
+            "metric": "ugi.UgiMetrics.LoginFailureNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/GetBlockLocations": {
+            "metric": "dfs.namenode.GetBlockLocations",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/fsync_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.FsyncNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_wio": {
+            "metric": "cpu_wio",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/create_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.CreateAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/PendingReplicationBlocks": {
+            "metric": "dfs.FSNamesystem.PendingReplicationBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_speed": {
+            "metric": "cpu_speed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/delete_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.DeleteAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FileInfoOps": {
+            "metric": "dfs.namenode.FileInfoOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/sendHeartbeat_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.SendHeartbeatNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/DeleteFileOps": {
+            "metric": "dfs.namenode.DeleteFileOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/RpcProcessingTime_avg_time": {
+            "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blockReport_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.BlockReportNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setSafeMode_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.SetSafeModeAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthenticationSuccesses": {
+            "metric": "rpc.rpc.RpcAuthenticationSuccesses",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/PendingDeletionBlocks": {
+            "metric": "dfs.FSNamesystem.PendingDeletionBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthenticationFailures": {
+            "metric": "rpc.rpc.RpcAuthenticationFailures",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/network/pkts_in": {
+            "metric": "pkts_in",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_total": {
+            "metric": "mem_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getEditLogSize_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memHeapCommittedM": {
+            "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FilesInGetListingOps": {
+            "metric": "dfs.namenode.FilesInGetListingOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsRunnable": {
+            "metric": "jvm.JvmMetrics.ThreadsRunnable",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/complete_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.CompleteNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsNew": {
+            "metric": "jvm.JvmMetrics.ThreadsNew",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rollFsImage_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.rollFsImage_num_ops",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthorizationFailures": {
+            "metric": "rpc.rpc.RpcAuthorizationFailures",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/Syncs_num_ops": {
+            "metric": "dfs.namenode.SyncsNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/RpcQueueTime_avg_time": {
+            "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blockReceived_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.BlockReceivedAndDeletedNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setReplication_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.setReplication_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rollEditLog_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.RollEditLogAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/SentBytes": {
+            "metric": "rpc.rpc.SentBytes",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/FilesTotal": {
+            "metric": "dfs.FSNamesystem.FilesTotal",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/logWarn": {
+            "metric": "jvm.JvmMetrics.LogWarn",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/ExcessBlocks": {
+            "metric": "dfs.FSNamesystem.ExcessBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsTimedWaiting": {
+            "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/gcCount": {
+            "metric": "jvm.JvmMetrics.GcCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/ReceivedBytes": {
+            "metric": "rpc.rpc.ReceivedBytes",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_nice": {
+            "metric": "cpu_nice",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/blockReport_num_ops": {
+            "metric": "dfs.namenode.BlockReportNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/SafemodeTime": {
+            "metric": "dfs.namenode.SafemodeTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rollFsImage_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.rollFsImage_avg_time",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/mkdirs_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.MkdirsAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/NumOpenConnections": {
+            "metric": "rpc.rpc.NumOpenConnections",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memHeapUsedM": {
+            "metric": "jvm.JvmMetrics.MemHeapUsedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/ScheduledReplicationBlocks": {
+            "metric": "dfs.FSNamesystem.ScheduledReplicationBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsWaiting": {
+            "metric": "jvm.JvmMetrics.ThreadsWaiting",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/disk/disk_free": {
+            "metric": "disk_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/BlocksTotal": {
+            "metric": "dfs.FSNamesystem.BlocksTotal",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/mem_buffers": {
+            "metric": "mem_buffers",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/gcTimeMillis": {
+            "metric": "jvm.JvmMetrics.GcTimeMillis",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getBlockLocations_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.GetBlockLocationsAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/Transactions_num_ops": {
+            "metric": "dfs.namenode.TransactionsNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/create_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.CreateNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsTerminated": {
+            "metric": "jvm.JvmMetrics.ThreadsTerminated",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/network/bytes_out": {
+            "metric": "bytes_out",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_user": {
+            "metric": "cpu_user",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/swap_free": {
+            "metric": "swap_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/load/load_five": {
+            "metric": "load_five",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_system": {
+            "metric": "cpu_system",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/CapacityRemainingGB": {
+            "metric": "dfs.FSNamesystem.CapacityRemainingGB",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/Transactions_avg_time": {
+            "metric": "dfs.namenode.TransactionsAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/boottime": {
+            "metric": "boottime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/MissingBlocks": {
+            "metric": "dfs.FSNamesystem.MissingBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/callQueueLen": {
+            "metric": "rpc.rpc.CallQueueLength",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/delete_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.DeleteNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/CorruptBlocks": {
+            "metric": "dfs.FSNamesystem.CorruptBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rename_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.RenameNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blockReport_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.BlockReportAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/mkdirs_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.MkdirsNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/load/load_fifteen": {
+            "metric": "load_fifteen",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/logInfo": {
+            "metric": "jvm.JvmMetrics.LogInfo",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/fsImageLoadTime": {
+            "metric": "dfs.namenode.FsImageLoadTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getListing_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.GetListingNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blocksBeingWrittenReport_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.blocksBeingWrittenReport_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rollEditLog_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.RollEditLogNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/addBlock_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.AddBlockAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blocksBeingWrittenReport_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.blocksBeingWrittenReport_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setOwner_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.SetOwnerAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/RpcProcessingTime_num_ops": {
+            "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/mem_shared": {
+            "metric": "mem_shared",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/UnderReplicatedBlocks": {
+            "metric": "dfs.FSNamesystem.UnderReplicatedBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/sendHeartbeat_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.SendHeartbeatAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/CreateFileOps": {
+            "metric": "dfs.namenode.CreateFileOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/logError": {
+            "metric": "jvm.JvmMetrics.LogError",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginFailure_avg_time": {
+            "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_num": {
+            "metric": "cpu_num",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getProtocolVersion_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_avg_time",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/register_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.RegisterDatanodeNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthorizationSuccesses": {
+            "metric": "rpc.rpc.RpcAuthorizationSuccesses",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getListing_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.GetListingAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/logFatal": {
+            "metric": "jvm.JvmMetrics.LogFatal",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/renewLease_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.RenewLeaseAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          }
+        }
+      },
+      {
+        "type": "jmx",
+        "metrics": {
+          "metrics/dfs/namenode/Used": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Used",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/TotalLoad": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.TotalLoad",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/memMaxM":{
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemMaxM",
+            "pointInTime" : true,
+            "temporal" : false
+          },
+          "metrics/dfs/FSNamesystem/BlockCapacity": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.BlockCapacity",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/TotalFiles": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalFiles",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/HostName": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.HostName",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/GetListingOps": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.GetListingOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/UpgradeFinalized": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.UpgradeFinalized",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getProtocolVersion_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getProtocolVersion_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/fsync_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.fsync_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/ugi/loginSuccess_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=ugi.loginSuccess_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/Safemode": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Safemode",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/CorruptBlocks": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CorruptBlocks",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/LiveNodes": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.LiveNodes",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/renewLease_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.renewLease_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getFileInfo_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getFileInfo_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/CapacityRemaining": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CapacityRemaining",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/PercentRemaining": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentRemaining",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/memNonHeapUsedM": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemNonHeapUsedM",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/complete_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.complete_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/CapacityTotalGB": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CapacityTotalGB",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getBlockLocations_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getBlockLocations_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/AddBlockOps": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.AddBlockOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/CapacityUsedGB": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CapacityUsedGB",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/Syncs_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.Syncs_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsBlocked": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsBlocked",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/RpcQueueTime_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcQueueTime_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/PercentUsed": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentUsed",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/DecomNodes": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DecomNodes",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/blockReport_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.blockReport_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/NonDfsUsedSpace": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.NonDfsUsedSpace",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/UpgradeFinalized": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.UpgradeFinalized",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getFileInfo_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getFileInfo_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getEditLogSize_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getEditLogSize_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/ugi/loginSuccess_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=ugi.loginSuccess_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/blockReceived_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReceived_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/Safemode": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Safemode",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/FilesCreated": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.FilesCreated",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/addBlock_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.addBlock_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/DecomNodes": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DecomNodes",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/CapacityUsed": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CapacityUsed",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/NonHeapMemoryUsed": {
+            "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/memNonHeapCommittedM": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemNonHeapCommittedM",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/DeadNodes": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DeadNodes",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/PercentUsed": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentUsed",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/Free": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Free",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/Total": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Total",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/GetBlockLocations": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.GetBlockLocations",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/ugi/loginFailure_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=ugi.loginFailure_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/fsync_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.fsync_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/HeapMemoryMax": {
+            "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/create_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.create_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/PendingReplicationBlocks": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.PendingReplicationBlocks",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/UnderReplicatedBlocks": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.UnderReplicatedBlocks",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/FileInfoOps": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.FileInfoOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/MissingBlocks": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.MissingBlocks",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/sendHeartbeat_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.sendHeartbeat_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/RpcProcessingTime_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcProcessingTime_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/blockReport_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReport_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/CapacityRemaining": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemState.CapacityRemaining",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/rpcAuthenticationSuccesses": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthenticationSuccesses",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/PendingDeletionBlocks": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.PendingDeletionBlocks",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/rpcAuthenticationFailures": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthenticationFailures",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getEditLogSize_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getEditLogSize_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/memHeapCommittedM": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemHeapCommittedM",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/FilesInGetListingOps": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.FilesInGetListingOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsRunnable": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsRunnable",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/BlocksTotal": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.BlocksTotal",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/CapacityTotal": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CapacityTotal",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/complete_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.complete_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/LiveNodes": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.LiveNodes",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsNew": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsNew",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/rollFsImage_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollFsImage_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/rpcAuthorizationFailures": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthorizationFailures",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/Syncs_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.Syncs_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/StartTime": {
+            "metric": "java.lang:type=Runtime.StartTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/RpcQueueTime_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcQueueTime_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/blockReceived_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReceived_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/rollEditLog_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollEditLog_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/DeadNodes": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DeadNodes",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/SentBytes": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.SentBytes",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/HeapMemoryUsed": {
+            "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/FilesTotal": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.FilesTotal",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/Version": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Version",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/logWarn": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogWarn",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/ExcessBlocks": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.ExcessBlocks",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsTimedWaiting": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsTimedWaiting",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/gcCount": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.GcCount",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/PercentRemaining": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentRemaining",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/ReceivedBytes": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.ReceivedBytes",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/blockReport_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.blockReport_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/NonHeapMemoryMax": {
+            "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/rollFsImage_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollFsImage_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/NumOpenConnections": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.NumOpenConnections",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/memHeapUsedM": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemHeapUsedM",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/ScheduledReplicationBlocks": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.ScheduledReplicationBlocks",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsWaiting": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsWaiting",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/BlocksTotal": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.BlocksTotal",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/gcTimeMillis": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.GcTimeMillis",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getBlockLocations_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getBlockLocations_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/Transactions_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.Transactions_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/create_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.create_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/CapacityTotal": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Total",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsTerminated": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsTerminated",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/CapacityRemainingGB": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CapacityRemainingGB",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/Transactions_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.Transactions_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/MissingBlocks": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.MissingBlocks",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/Threads": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Threads",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/callQueueLen": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.callQueueLen",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/CorruptBlocks": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CorruptBlocks",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/blockReport_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReport_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/TotalFiles": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalFiles",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/logInfo": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogInfo",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/NameDirStatuses": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.NameDirStatuses",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getListing_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getListing_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/rollEditLog_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollEditLog_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/addBlock_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.addBlock_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/RpcProcessingTime_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcProcessingTime_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/CapacityUsed": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Used",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/UnderReplicatedBlocks": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.UnderReplicatedBlocks",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/sendHeartbeat_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.sendHeartbeat_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/CreateFileOps": {
+            "metric": "Hadoop:service=NameNode,name=NameNode.CreateFileOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/logError": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogError",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/ugi/loginFailure_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=ugi.loginFailure_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getProtocolVersion_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getProtocolVersion_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/rpcAuthorizationSuccesses": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthorizationSuccesses",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "ServiceComponentInfo/Version": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Version",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getListing_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getListing_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/logFatal": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogFatal",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/NonDfsUsedSpace": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.NonDfsUsedSpace",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/renewLease_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.renewLease_avg_time",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/TotalBlocks": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalBlocks",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/CapacityNonDFSUsed": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CapacityNonDFSUsed",
+            "pointInTime": true,
+            "temporal": false
+          }
+        }
+      }
+    ],
+    "HostComponent": [
+      {
+        "type": "ganglia",
+        "metrics": {
+          "metrics/dfs/FSNamesystem/TotalLoad": {
+            "metric": "dfs.FSNamesystem.TotalLoad",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/BlockCapacity": {
+            "metric": "dfs.FSNamesystem.BlockCapacity",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/GetListingOps": {
+            "metric": "dfs.namenode.GetListingOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FilesAppended": {
+            "metric": "dfs.namenode.FilesAppended",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getProtocolVersion_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_num_ops",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/fsync_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.FsyncAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginSuccess_avg_time": {
+            "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/load/load_one": {
+            "metric": "load_one",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/renewLease_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.RenewLeaseNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getFileInfo_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.GetFileInfoAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memNonHeapUsedM": {
+            "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/complete_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.CompleteAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setPermission_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.SetPermissionNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/CapacityTotalGB": {
+            "metric": "dfs.FSNamesystem.CapacityTotalGB",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setOwner_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.SetOwnerNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getBlockLocations_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.GetBlockLocationsNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/process/proc_run": {
+            "metric": "proc_run",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/CapacityUsedGB": {
+            "metric": "dfs.FSNamesystem.CapacityUsedGB",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/AddBlockOps": {
+            "metric": "dfs.namenode.AddBlockOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/swap_total": {
+            "metric": "swap_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FilesDeleted": {
+            "metric": "dfs.namenode.FilesDeleted",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/Syncs_avg_time": {
+            "metric": "dfs.namenode.SyncsAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsBlocked": {
+            "metric": "jvm.JvmMetrics.ThreadsBlocked",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/RpcQueueTime_num_ops": {
+            "metric": "rpc.rpc.RpcQueueTimeNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/process/proc_total": {
+            "metric": "proc_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/blockReport_avg_time": {
+            "metric": "dfs.namenode.BlockReportAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/disk/part_max_used": {
+            "metric": "part_max_used",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getFileInfo_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.GetFileInfoNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getEditLogSize_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginSuccess_num_ops": {
+            "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blockReceived_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.BlockReceivedAndDeletedAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_idle": {
+            "metric": "cpu_idle",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/versionRequest_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.VersionRequestAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_aidle": {
+            "metric": "cpu_aidle",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_free": {
+            "metric": "mem_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/versionRequest_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.VersionRequestNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/addBlock_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.AddBlockNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FilesCreated": {
+            "metric": "dfs.namenode.FilesCreated",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rename_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.RenameAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/bytes_in": {
+            "metric": "bytes_in",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setSafeMode_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.SetSafeModeNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/network/pkts_out": {
+            "metric": "pkts_out",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/memNonHeapCommittedM": {
+            "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/mem_cached": {
+            "metric": "mem_cached",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/disk/disk_total": {
+            "metric": "disk_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setPermission_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.SetPermissionAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FilesRenamed": {
+            "metric": "dfs.namenode.FilesRenamed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/register_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.RegisterDatanodeAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setReplication_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.setReplication_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/JournalTransactionsBatchedInSync": {
+            "metric": "dfs.namenode.JournalTransactionsBatchedInSync",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/ugi/loginFailure_num_ops": {
+            "metric": "ugi.UgiMetrics.LoginFailureNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/GetBlockLocations": {
+            "metric": "dfs.namenode.GetBlockLocations",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/fsync_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.FsyncNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_wio": {
+            "metric": "cpu_wio",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/create_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.CreateAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/PendingReplicationBlocks": {
+            "metric": "dfs.FSNamesystem.PendingReplicationBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_speed": {
+            "metric": "cpu_speed",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/delete_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.DeleteAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FileInfoOps": {
+            "metric": "dfs.namenode.FileInfoOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/sendHeartbeat_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.SendHeartbeatNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/DeleteFileOps": {
+            "metric": "dfs.namenode.DeleteFileOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/RpcProcessingTime_avg_time": {
+            "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blockReport_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.BlockReportNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setSafeMode_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.SetSafeModeAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthenticationSuccesses": {
+            "metric": "rpc.rpc.RpcAuthenticationSuccesses",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/PendingDeletionBlocks": {
+            "metric": "dfs.FSNamesystem.PendingDeletionBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthenticationFailures": {
+            "metric": "rpc.rpc.RpcAuthenticationFailures",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/network/pkts_in": {
+            "metric": "pkts_in",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/mem_total": {
+            "metric": "mem_total",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getEditLogSize_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memHeapCommittedM": {
+            "metric": "jvm.JvmMetrics.MemHeapCommittedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/FilesInGetListingOps": {
+            "metric": "dfs.namenode.FilesInGetListingOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsRunnable": {
+            "metric": "jvm.JvmMetrics.ThreadsRunnable",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/complete_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.CompleteNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsNew": {
+            "metric": "jvm.JvmMetrics.ThreadsNew",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rollFsImage_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.rollFsImage_num_ops",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthorizationFailures": {
+            "metric": "rpc.rpc.RpcAuthorizationFailures",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/Syncs_num_ops": {
+            "metric": "dfs.namenode.SyncsNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/RpcQueueTime_avg_time": {
+            "metric": "rpc.rpc.RpcQueueTimeAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blockReceived_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.BlockReceivedAndDeletedNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setReplication_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.setReplication_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rollEditLog_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.RollEditLogAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/SentBytes": {
+            "metric": "rpc.rpc.SentBytes",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/FilesTotal": {
+            "metric": "dfs.FSNamesystem.FilesTotal",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/logWarn": {
+            "metric": "jvm.JvmMetrics.LogWarn",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/ExcessBlocks": {
+            "metric": "dfs.FSNamesystem.ExcessBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsTimedWaiting": {
+            "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/gcCount": {
+            "metric": "jvm.JvmMetrics.GcCount",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/ReceivedBytes": {
+            "metric": "rpc.rpc.ReceivedBytes",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_nice": {
+            "metric": "cpu_nice",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/blockReport_num_ops": {
+            "metric": "dfs.namenode.BlockReportNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/SafemodeTime": {
+            "metric": "dfs.namenode.SafemodeTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rollFsImage_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.rollFsImage_avg_time",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/mkdirs_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.MkdirsAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/NumOpenConnections": {
+            "metric": "rpc.rpc.NumOpenConnections",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/memHeapUsedM": {
+            "metric": "jvm.JvmMetrics.MemHeapUsedM",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/ScheduledReplicationBlocks": {
+            "metric": "dfs.FSNamesystem.ScheduledReplicationBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsWaiting": {
+            "metric": "jvm.JvmMetrics.ThreadsWaiting",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/disk/disk_free": {
+            "metric": "disk_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/BlocksTotal": {
+            "metric": "dfs.FSNamesystem.BlocksTotal",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/mem_buffers": {
+            "metric": "mem_buffers",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/gcTimeMillis": {
+            "metric": "jvm.JvmMetrics.GcTimeMillis",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getBlockLocations_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.GetBlockLocationsAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/Transactions_num_ops": {
+            "metric": "dfs.namenode.TransactionsNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/create_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.CreateNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/threadsTerminated": {
+            "metric": "jvm.JvmMetrics.ThreadsTerminated",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/network/bytes_out": {
+            "metric": "bytes_out",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_user": {
+            "metric": "cpu_user",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/memory/swap_free": {
+            "metric": "swap_free",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/load/load_five": {
+            "metric": "load_five",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_system": {
+            "metric": "cpu_system",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/CapacityRemainingGB": {
+            "metric": "dfs.FSNamesystem.CapacityRemainingGB",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/Transactions_avg_time": {
+            "metric": "dfs.namenode.TransactionsAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/boottime": {
+            "metric": "boottime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/MissingBlocks": {
+            "metric": "dfs.FSNamesystem.MissingBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpc/callQueueLen": {
+            "metric": "rpc.rpc.CallQueueLength",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/delete_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.DeleteNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/CorruptBlocks": {
+            "metric": "dfs.FSNamesystem.CorruptBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rename_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.RenameNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blockReport_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.BlockReportAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/mkdirs_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.MkdirsNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/load/load_fifteen": {
+            "metric": "load_fifteen",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/jvm/logInfo": {
+            "metric": "jvm.JvmMetrics.LogInfo",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/fsImageLoadTime": {
+            "metric": "dfs.namenode.FsImageLoadTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getListing_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.GetListingNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blocksBeingWrittenReport_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.blocksBeingWrittenReport_num_ops",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/rollEditLog_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.RollEditLogNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/addBlock_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.AddBlockAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/blocksBeingWrittenReport_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.blocksBeingWrittenReport_avg_time",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/setOwner_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.SetOwnerAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/RpcProcessingTime_num_ops": {
+            "metric": "rpc.rpc.RpcProcessingTimeNumOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/memory/mem_shared": {
+            "metric": "mem_shared",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/dfs/FSNamesystem/UnderReplicatedBlocks": {
+            "metric": "dfs.FSNamesystem.UnderReplicatedBlocks",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/sendHeartbeat_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.SendHeartbeatAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/dfs/namenode/CreateFileOps": {
+            "metric": "dfs.namenode.CreateFileOps",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/logError": {
+            "metric": "jvm.JvmMetrics.LogError",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/ugi/loginFailure_avg_time": {
+            "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/cpu/cpu_num": {
+            "metric": "cpu_num",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getProtocolVersion_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_avg_time",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/register_num_ops": {
+            "metric": "rpcdetailed.rpcdetailed.RegisterDatanodeNumOps",
+            "pointInTime": true,
+            "temporal": true
+          },
+          "metrics/rpc/rpcAuthorizationSuccesses": {
+            "metric": "rpc.rpc.RpcAuthorizationSuccesses",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/getListing_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.GetListingAvgTime",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/jvm/logFatal": {
+            "metric": "jvm.JvmMetrics.LogFatal",
+            "pointInTime": false,
+            "temporal": true
+          },
+          "metrics/rpcdetailed/renewLease_avg_time": {
+            "metric": "rpcdetailed.rpcdetailed.RenewLeaseAvgTime",
+            "pointInTime": true,
+            "temporal": true
+          }
+        }
+      },
+      {
+        "type": "jmx",
+        "metrics": {
+          "metrics/dfs/namenode/Used": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Used",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/TotalLoad": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystem.TotalLoad",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/memMaxM":{
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemMaxM",
+            "pointInTime" : true,
+            "temporal" : false
+          },
+          "metrics/dfs/FSNamesystem/BlockCapacity": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystem.BlockCapacity",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/TotalFiles": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalFiles",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/GetListingOps": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeActivity.GetListingOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/HostName": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeActivity.tag.Hostname",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/runtime/StartTime": {
+            "metric": "java.lang:type=Runtime.StartTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/UpgradeFinalized": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.UpgradeFinalized",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getProtocolVersion_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.VersionRequestNumOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/fsync_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.FsyncAvgTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/ugi/loginSuccess_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=UgiMetrics.LoginSuccessAvgTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/renewLease_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.renewLease_num_ops",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/CapacityRemaining": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityRemaining",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getFileInfo_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.GetFileInfoAvgTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/PercentRemaining": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentRemaining",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/memNonHeapUsedM": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemNonHeapUsedM",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/complete_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.CompleteAvgTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/CapacityTotalGB": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityTotalGB",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getBlockLocations_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.GetBlockLocationsNumOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/FSNamesystem/CapacityUsedGB": {
+            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityUsedGB",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/AddBlockOps": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeActivity.AddBlockOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/Syncs_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeActivity.SyncsAvgTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/threadsBlocked": {
+            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsBlocked",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/PercentUsed": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentUsed",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpc/RpcQueueTime_num_ops": {
+            "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcQueueTimeNumOps",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/dfs/namenode/blockReport_avg_time": {
+            "metric": "Hadoop:service=NameNode,name=NameNodeActivity.BlockReportAvgTime",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/HeapMemoryMax": {
+            "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/jvm/HeapMemoryUsed": {
+            "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
+            "pointInTime": true,
+            "temporal": false
+          },
+          "metrics/rpcdetailed/getFileInfo_num_ops": {
+            "m

<TRUNCATED>

[26/37] AMBARI-4341. Rename 2.0.8 to 2.1.1 in the stack definition. (mahadev)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/metrics.json b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/metrics.json
deleted file mode 100644
index 37f73bf..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/metrics.json
+++ /dev/null
@@ -1,13635 +0,0 @@
-{
-  "HBASE_REGIONSERVER": {
-    "Component": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "metrics/hbase/regionserver/compactionTime_avg_time": {
-            "metric": "hbase.regionserver.compactionTime_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/closeRegion_num_ops": {
-            "metric": "rpc.rpc.closeRegion_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/mutationsWithoutWALSize": {
-            "metric": "regionserver.Server.mutationsWithoutWALSize",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/unassign_num_ops": {
-            "metric": "rpc.rpc.unassign_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/modifyTable_num_ops": {
-            "metric": "rpc.rpc.modifyTable_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getProtocolVersion_avg_time": {
-            "metric": "rpc.rpc.getProtocolVersion_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getZooKeeper/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.getZooKeeper.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/load/load_one": {
-            "metric": "load_one",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getClosestRowBefore_num_ops": {
-            "metric": "rpc.rpc.getClosestRowBefore_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/slowAppendCount": {
-            "metric": "regionserver.Server.slowAppendCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/getClosestRowBefore/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.getClosestRowBefore.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/lockRow_num_ops": {
-            "metric": "rpc.rpc.lockRow_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/flushRegion_avg_time": {
-            "metric": "rpc.rpc.flushRegion_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/swap_total": {
-            "metric": "swap_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/stopMaster_num_ops": {
-            "metric": "rpc.rpc.stopMaster_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/openRegions/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.openRegions.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/balance_avg_time": {
-            "metric": "rpc.rpc.balance_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/process/proc_total": {
-            "metric": "proc_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/disk/part_max_used": {
-            "metric": "part_max_used",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/modifyColumn_avg_time": {
-            "metric": "rpc.rpc.modifyColumn_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/multi/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.multi.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/rootIndexSizeKB": {
-            "metric": "hbase.regionserver.rootIndexSizeKB",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getZooKeeper_num_ops": {
-            "metric": "rpc.rpc.getZooKeeper_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/blockCacheCount": {
-            "metric": "regionserver.Server.blockCacheCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/flushRegion_num_ops": {
-            "metric": "rpc.rpc.flushRegion_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/ugi/loginSuccess_num_ops": {
-            "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rollHLogWriter/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.rollHLogWriter.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.removeFromOnlineRegions.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/putRequestLatency_std_dev": {
-            "metric": "hbase.regionserver.putRequestLatency_std_dev",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/getRequestLatency_std_dev": {
-            "metric": "hbase.regionserver.getRequestLatency_std_dev",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/get_num_ops": {
-            "metric": "rpc.rpc.get_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/stopMaster_avg_time": {
-            "metric": "rpc.rpc.stopMaster_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getOnlineRegions/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.getOnlineRegions.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/bytes_in": {
-            "metric": "bytes_in",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/removeFromOnlineRegions_num_ops": {
-            "metric": "rpc.rpc.removeFromOnlineRegions_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/ping_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.ping_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/openScanner/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.openScanner.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getRegionInfo_avg_time": {
-            "metric": "rpc.rpc.getRegionInfo_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/lockRow_avg_time": {
-            "metric": "rpc.rpc.lockRow_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/commitPending_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.commitPending_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/checkOOME_num_ops": {
-            "metric": "rpc.rpc.checkOOME_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/reportRSFatalError_num_ops": {
-            "metric": "rpc.rpc.reportRSFatalError_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getConfiguration/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.getConfiguration.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/reportRSFatalError_avg_time": {
-            "metric": "rpc.rpc.reportRSFatalError_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/pkts_in": {
-            "metric": "pkts_in",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_total": {
-            "metric": "mem_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/memHeapCommittedM": {
-            "metric": "jvm.JvmMetrics.MemHeapCommittedM",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/threadsRunnable": {
-            "metric": "jvm.JvmMetrics.ThreadsRunnable",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/unlockRow/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.unlockRow.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_min": {
-            "metric": "regionserver.Server.Delete_min",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsNew": {
-            "metric": "jvm.JvmMetrics.ThreadsNew",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getClusterStatus_num_ops": {
-            "metric": "rpc.rpc.getClusterStatus_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getHTableDescriptors_avg_time": {
-            "metric": "rpc.rpc.getHTableDescriptors_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthorizationFailures": {
-            "metric": "rpc.rpc.rpcAuthorizationFailures",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/deleteColumn_num_ops": {
-            "metric": "rpc.rpc.deleteColumn_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/delete/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.delete.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/RpcQueueTime_avg_time": {
-            "metric": "rpc.rpc.RpcQueueTimeAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/increment_num_ops": {
-            "metric": "rpc.rpc.increment_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getMapCompletionEvents_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.getMapCompletionEvents_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/stop/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.stop.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/modifyColumn_num_ops": {
-            "metric": "rpc.rpc.modifyColumn_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/checkOOME_avg_time": {
-            "metric": "rpc.rpc.checkOOME_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/next/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.next.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/RpcSlowResponse_avg_time": {
-            "metric": "rpc.rpc.RpcSlowResponse_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getConfiguration_avg_time": {
-            "metric": "rpc.rpc.getConfiguration_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getServerName/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.getServerName.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_nice": {
-            "metric": "cpu_nice",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/unassign_avg_time": {
-            "metric": "rpc.rpc.unassign_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/NumOpenConnections": {
-            "metric": "rpc.rpc.NumOpenConnections",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/delete/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.delete.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/canCommit_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.canCommit_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/multi/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.multi.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/bytes_out": {
-            "metric": "bytes_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/load/load_five": {
-            "metric": "load_five",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsReadLatencyHistogram_75th_percentile": {
-            "metric": "hbase.regionserver.fsReadLatencyHistogram_75th_percentile",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_num_ops": {
-            "metric": "regionserver.Server.Delete_num_ops",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/boottime": {
-            "metric": "boottime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/compactRegion_avg_time": {
-            "metric": "rpc.rpc.compactRegion_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsWriteLatencyHistogram_num_ops": {
-            "metric": "hbase.regionserver.fsWriteLatencyHistogram_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/writeRequestsCount": {
-            "metric": "regionserver.Server.writeRequestCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/getProtocolSignature/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.getProtocolSignature.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/execCoprocessor_num_ops": {
-            "metric": "rpc.rpc.execCoprocessor_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/canCommit_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.canCommit_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getHServerInfo/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.getHServerInfo.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/getRequestLatency_min": {
-            "metric": "regionserver.Server.Get_min",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/getCatalogTracker/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.getCatalogTracker.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/incrementColumnValue_avg_time": {
-            "metric": "rpc.rpc.incrementColumnValue_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/RpcProcessingTime_num_ops": {
-            "metric": "rpc.rpc.RpcProcessingTimeNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/deleteTable_num_ops": {
-            "metric": "rpc.rpc.deleteTable_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/logError": {
-            "metric": "jvm.JvmMetrics.LogError",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getBlockCacheColumnFamilySummaries_num_ops": {
-            "metric": "rpc.rpc.getBlockCacheColumnFamilySummaries_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/putRequestLatency_75th_percentile": {
-            "metric": "regionserver.Server.Mutate_75th_percentile",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/getFromOnlineRegions/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.getFromOnlineRegions.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getRegionInfo/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.getRegionInfo.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/blockCacheHitCount": {
-            "metric": "regionserver.Server.blockCacheHitCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/exists_avg_time": {
-            "metric": "rpc.rpc.exists_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/slowPutCount": {
-            "metric": "regionserver.Server.slowPutCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsWriteLatency_num_ops": {
-            "metric": "hbase.regionserver.fsWriteLatency_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getOnlineRegions/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.getOnlineRegions.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/exists/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.exists.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/replicateLogEntries/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.replicateLogEntries.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/delete_num_ops": {
-            "metric": "rpc.rpc.delete_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/exists_num_ops": {
-            "metric": "rpc.rpc.exists_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/regionServerStartup_avg_time": {
-            "metric": "rpc.rpc.regionServerStartup_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/checkAndDelete_num_ops": {
-            "metric": "rpc.rpc.checkAndDelete_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/closeRegion_avg_time": {
-            "metric": "rpc.rpc.closeRegion_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getBlockLocalPathInfo_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.getBlockLocalPathInfo_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getProtocolSignature_avg_time": {
-            "metric": "rpc.rpc.getProtocolSignature_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/assign_avg_time": {
-            "metric": "rpc.rpc.assign_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/compactionSize_num_ops": {
-            "metric": "hbase.regionserver.compactionSize_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/close_avg_time": {
-            "metric": "rpc.rpc.close_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/blockCacheSize": {
-            "metric": "regionserver.Server.blockCacheSize",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/putRequestLatency_num_ops": {
-            "metric": "regionserver.Server.Mutate_num_ops",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsBlocked": {
-            "metric": "jvm.JvmMetrics.ThreadsBlocked",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getHServerInfo_num_ops": {
-            "metric": "rpc.rpc.getHServerInfo_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/stop_avg_time": {
-            "metric": "rpc.rpc.stop_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/isStopped_num_ops": {
-            "metric": "rpc.rpc.isStopped_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/putRequestLatency_median": {
-            "metric": "regionserver.Server.Mutate_median",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsReadLatencyHistogram_num_ops": {
-            "metric": "hbase.regionserver.fsReadLatencyHistogram_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsWriteLatencyHistogram_median": {
-            "metric": "hbase.regionserver.fsWriteLatencyHistogram_median",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/isMasterRunning_avg_time": {
-            "metric": "rpc.rpc.isMasterRunning_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/incrementColumnValue_num_ops": {
-            "metric": "rpc.rpc.incrementColumnValue_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_std_dev": {
-            "metric": "hbase.regionserver.deleteRequestLatency_std_dev",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/bulkLoadHFiles/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.bulkLoadHFiles.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/hdfsBlocksLocalityIndex": {
-            "metric": "hbase.regionserver.hdfsBlocksLocalityIndex",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/readRequestsCount": {
-            "metric": "regionserver.Server.readRequestCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_free": {
-            "metric": "mem_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/execCoprocessor/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.execCoprocessor.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/putRequestLatency_min": {
-            "metric": "regionserver.Server.Mutate_min",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/storefileIndexSizeMB": {
-            "metric": "regionserver.Server.storeFileIndexSize",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/assign_num_ops": {
-            "metric": "rpc.rpc.assign_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/close/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.close.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_median": {
-            "metric": "regionserver.Server.Delete_median",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/enableTable_avg_time": {
-            "metric": "rpc.rpc.enableTable_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/putRequestLatency_mean": {
-            "metric": "regionserver.Server.Mutate_mean",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/incrementColumnValue/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.incrementColumnValue.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/pkts_out": {
-            "metric": "pkts_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/close_num_ops": {
-            "metric": "rpc.rpc.close_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_cached": {
-            "metric": "mem_cached",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getConfiguration/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.getConfiguration.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/done_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.done_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/compactionSize_avg_time": {
-            "metric": "hbase.regionserver.compactionSize_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getFromOnlineRegions_avg_time": {
-            "metric": "rpc.rpc.getFromOnlineRegions_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsReadLatencyHistogram_min": {
-            "metric": "hbase.regionserver.fsReadLatencyHistogram_min",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/increment/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.increment.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/deleteTable_avg_time": {
-            "metric": "rpc.rpc.deleteTable_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/put/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.put.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/delete_avg_time": {
-            "metric": "rpc.rpc.delete_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/statusUpdate_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.statusUpdate_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/openRegions/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.openRegions.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/compactRegion/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.compactRegion.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/RpcProcessingTime_avg_time": {
-            "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthenticationFailures": {
-            "metric": "rpc.rpc.rpcAuthenticationFailures",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/openScanner/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.openScanner.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getClusterStatus_avg_time": {
-            "metric": "rpc.rpc.getClusterStatus_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/unlockRow/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.unlockRow.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/removeFromOnlineRegions_avg_time": {
-            "metric": "rpc.rpc.removeFromOnlineRegions_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/put/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.put.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/modifyTable_avg_time": {
-            "metric": "rpc.rpc.modifyTable_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/checkAndPut_avg_time": {
-            "metric": "rpc.rpc.checkAndPut_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/isStopped/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.isStopped.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/put_avg_time": {
-            "metric": "rpc.rpc.put_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/blockCacheHitRatio": {
-            "metric": "hbase.regionserver.blockCacheHitRatio",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/createTable_avg_time": {
-            "metric": "rpc.rpc.createTable_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getRegionInfo/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.getRegionInfo.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsReadLatencyHistogram_std_dev": {
-            "metric": "hbase.regionserver.fsReadLatencyHistogram_std_dev",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getHTableDescriptors_num_ops": {
-            "metric": "rpc.rpc.getHTableDescriptors_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getAlterStatus_avg_time": {
-            "metric": "rpc.rpc.getAlterStatus_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getRegionInfo_num_ops": {
-            "metric": "rpc.rpc.getRegionInfo_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/statusUpdate_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.statusUpdate_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/compactRegion_num_ops": {
-            "metric": "rpc.rpc.compactRegion_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/isAborted_num_ops": {
-            "metric": "rpc.rpc.isAborted_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getFromOnlineRegions/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.getFromOnlineRegions.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsReadLatencyHistogram_max": {
-            "metric": "hbase.regionserver.fsReadLatencyHistogram_max",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/blockCacheEvictedCount": {
-            "metric": "regionserver.Server.blockCacheEvictionCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/checkOOME/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.checkOOME.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_buffers": {
-            "metric": "mem_buffers",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/disableTable_num_ops": {
-            "metric": "rpc.rpc.disableTable_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/openScanner_num_ops": {
-            "metric": "rpc.rpc.openScanner_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/bulkLoadHFiles/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.bulkLoadHFiles.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_user": {
-            "metric": "cpu_user",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/swap_free": {
-            "metric": "swap_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/regionServerReport_num_ops": {
-            "metric": "rpc.rpc.regionServerReport_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/openRegions_avg_time": {
-            "metric": "rpc.rpc.openRegions_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/exists/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.exists.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/putRequestLatency_99th_percentile": {
-            "metric": "regionserver.Server.Mutate_99th_percentile",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/load/load_fifteen": {
-            "metric": "load_fifteen",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/execCoprocessor/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.execCoprocessor.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/isMasterRunning_num_ops": {
-            "metric": "rpc.rpc.isMasterRunning_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/balanceSwitch_num_ops": {
-            "metric": "rpc.rpc.balanceSwitch_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/offline_num_ops": {
-            "metric": "rpc.rpc.offline_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/getRequestLatency_max": {
-            "metric": "regionserver.Server.Get_max",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_shared": {
-            "metric": "mem_shared",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/abort_num_ops": {
-            "metric": "rpc.rpc.abort_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsReadLatencyHistogram_95th_percentile": {
-            "metric": "hbase.regionserver.fsReadLatencyHistogram_95th_percentile",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/blockCacheHitCachingRatio": {
-            "metric": "hbase.regionserver.blockCacheHitCachingRatio",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/ugi/loginFailure_avg_time": {
-            "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getProtocolVersion_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_num": {
-            "metric": "cpu_num",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rollHLogWriter_num_ops": {
-            "metric": "rpc.rpc.rollHLogWriter_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/openRegions_num_ops": {
-            "metric": "rpc.rpc.openRegions_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/logFatal": {
-            "metric": "jvm.JvmMetrics.LogFatal",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/splitRegion_avg_time": {
-            "metric": "rpc.rpc.splitRegion_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/closeRegion/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.closeRegion.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/checkAndPut/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.checkAndPut.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/getRequestLatency_99th_percentile": {
-            "metric": "regionserver.Server.Get_99th_percentile",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsWriteLatencyHistogram_min": {
-            "metric": "hbase.regionserver.fsWriteLatencyHistogram_min",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/splitRegion/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.splitRegion.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getProtocolVersion_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsWriteLatencyHistogram_std_dev": {
-            "metric": "hbase.regionserver.fsWriteLatencyHistogram_std_dev",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/ugi/loginSuccess_avg_time": {
-            "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_99th_percentile": {
-            "metric": "regionserver.Server.Delete_99th_percentile",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/getHServerInfo/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.getHServerInfo.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsWriteLatencyHistogram_max": {
-            "metric": "hbase.regionserver.fsWriteLatencyHistogram_max",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/memNonHeapUsedM": {
-            "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getTask_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.getTask_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/replicateLogEntries_num_ops": {
-            "metric": "rpc.rpc.replicateLogEntries_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/multi_avg_time": {
-            "metric": "rpc.rpc.multi_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/slowIncrementCount": {
-            "metric": "regionserver.Server.slowIncrementCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/putRequestLatency_95th_percentile": {
-            "metric": "regionserver.Server.Mutate_95th_percentile",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/compactionQueueSize": {
-            "metric": "regionserver.Server.compactionQueueLength",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/getCatalogTracker_avg_time": {
-            "metric": "rpc.rpc.getCatalogTracker_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/splitRegion_num_ops": {
-            "metric": "rpc.rpc.splitRegion_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/balance_num_ops": {
-            "metric": "rpc.rpc.balance_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/flushTime_num_ops": {
-            "metric": "hbase.regionserver.flushTime_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/shutdown_num_ops": {
-            "metric": "rpc.rpc.shutdown_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsReadLatency_num_ops": {
-            "metric": "hbase.regionserver.fsReadLatency_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/isAborted/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.isAborted.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_idle": {
-            "metric": "cpu_idle",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/getRequestLatency_75th_percentile": {
-            "metric": "regionserver.Server.Get_75th_percentile",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/getProtocolSignature/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.getProtocolSignature.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getServerName_avg_time": {
-            "metric": "rpc.rpc.getServerName_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/memNonHeapCommittedM": {
-            "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/compactionTime_num_ops": {
-            "metric": "hbase.regionserver.compactionTime_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/abort_avg_time": {
-            "metric": "rpc.rpc.abort_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getCatalogTracker/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.getCatalogTracker.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getBlockLocalPathInfo_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.getBlockLocalPathInfo_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/enableTable_num_ops": {
-            "metric": "rpc.rpc.enableTable_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/lockRow/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.lockRow.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/stores": {
-            "metric": "regionserver.Server.storeCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/addColumn_avg_time": {
-            "metric": "rpc.rpc.addColumn_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/addToOnlineRegions/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.addToOnlineRegions.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/ugi/loginFailure_num_ops": {
-            "metric": "ugi.UgiMetrics.LoginFailureNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_wio": {
-            "metric": "cpu_wio",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getZooKeeper/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.getZooKeeper.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getServerName_num_ops": {
-            "metric": "rpc.rpc.getServerName_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getServerName/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.getServerName.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthenticationSuccesses": {
-            "metric": "rpc.rpc.rpcAuthenticationSuccesses",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/isStopped/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.isStopped.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/disableTable_avg_time": {
-            "metric": "rpc.rpc.disableTable_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rollHLogWriter/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.rollHLogWriter.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/abort/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.abort.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/openRegion_avg_time": {
-            "metric": "rpc.rpc.openRegion_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/regionServerReport_avg_time": {
-            "metric": "rpc.rpc.regionServerReport_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getAlterStatus_num_ops": {
-            "metric": "rpc.rpc.getAlterStatus_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/flushRegion/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.flushRegion.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/incrementColumnValue/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.incrementColumnValue.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/next_avg_time": {
-            "metric": "rpc.rpc.next_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/getRequestLatency_num_ops": {
-            "metric": "regionserver.Server.Get_num_ops",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/ReceivedBytes": {
-            "metric": "rpc.rpc.ReceivedBytes",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/bulkLoadHFiles_num_ops": {
-            "metric": "rpc.rpc.bulkLoadHFiles_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/ping_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.ping_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsReadLatency_avg_time": {
-            "metric": "hbase.regionserver.fsReadLatency_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/flushSize_num_ops": {
-            "metric": "hbase.regionserver.flushSize_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/gcTimeMillis": {
-            "metric": "jvm.JvmMetrics.GcTimeMillis",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/threadsTerminated": {
-            "metric": "jvm.JvmMetrics.ThreadsTerminated",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getProtocolVersion/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.getProtocolVersion.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/checkAndDelete/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.checkAndDelete.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/balanceSwitch_avg_time": {
-            "metric": "rpc.rpc.balanceSwitch_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/putRequestLatency_max": {
-            "metric": "regionserver.Server.Mutate_max",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/openRegion/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.openRegion.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/lockRow/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.lockRow.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/callQueueLen": {
-            "metric": "rpc.rpc.callQueueLen",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/openRegion_num_ops": {
-            "metric": "rpc.rpc.openRegion_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/compactRegion/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.compactRegion.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsSyncLatency_num_ops": {
-            "metric": "hbase.regionserver.fsSyncLatency_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsWriteLatencyHistogram_95th_percentile": {
-            "metric": "hbase.regionserver.fsWriteLatencyHistogram_95th_percentile",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getOnlineRegions_avg_time": {
-            "metric": "rpc.rpc.getOnlineRegions_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsWriteLatencyHistogram_75th_percentile": {
-            "metric": "hbase.regionserver.fsWriteLatencyHistogram_75th_percentile",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/move_num_ops": {
-            "metric": "rpc.rpc.move_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/stop_num_ops": {
-            "metric": "rpc.rpc.stop_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/replicateLogEntries_avg_time": {
-            "metric": "rpc.rpc.replicateLogEntries_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/getRequestLatency_mean": {
-            "metric": "regionserver.Server.Get_mean",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/get_avg_time": {
-            "metric": "rpc.rpc.get_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/multi_num_ops": {
-            "metric": "rpc.rpc.multi_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/next/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.next.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/addToOnlineRegions_avg_time": {
-            "metric": "rpc.rpc.addToOnlineRegions_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/deleteColumn_avg_time": {
-            "metric": "rpc.rpc.deleteColumn_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/regions": {
-            "metric": "regionserver.Server.regionCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/bulkLoadHFiles_avg_time": {
-            "metric": "rpc.rpc.bulkLoadHFiles_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/isAborted/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.isAborted.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/stop/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.stop.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/addToOnlineRegions_num_ops": {
-            "metric": "rpc.rpc.addToOnlineRegions_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/abort/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.abort.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/removeFromOnlineRegions/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.removeFromOnlineRegions.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/blockCacheFree": {
-            "metric": "regionserver.Server.blockCacheFreeSize",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/offline_avg_time": {
-            "metric": "rpc.rpc.offline_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/unlockRow_avg_time": {
-            "metric": "rpc.rpc.unlockRow_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/blockCacheMissCount": {
-            "metric": "regionserver.Server.blockCacheMissCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/getCatalogTracker_num_ops": {
-            "metric": "rpc.rpc.getCatalogTracker_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/checkOOME/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.checkOOME.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/flushQueueSize": {
-            "metric": "regionserver.Server.flushQueueLength",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/checkAndPut/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.checkAndPut.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/close/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.close.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/execCoprocessor_avg_time": {
-            "metric": "rpc.rpc.execCoprocessor_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsWriteLatencyHistogram_mean": {
-            "metric": "hbase.regionserver.fsWriteLatencyHistogram_mean",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/createTable_num_ops": {
-            "metric": "rpc.rpc.createTable_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/process/proc_run": {
-            "metric": "proc_run",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getConfiguration_num_ops": {
-            "metric": "rpc.rpc.getConfiguration_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/isStopped_avg_time": {
-            "metric": "rpc.rpc.isStopped_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/RpcQueueTime_num_ops": {
-            "metric": "rpc.rpc.RpcQueueTimeNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rollHLogWriter_avg_time": {
-            "metric": "rpc.rpc.rollHLogWriter_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsSyncLatency_avg_time": {
-            "metric": "hbase.regionserver.fsSyncLatency_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_mean": {
-            "metric": "regionserver.Server.Delete_mean",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/addToOnlineRegions/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.addToOnlineRegions.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getMapCompletionEvents_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.getMapCompletionEvents_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_aidle": {
-            "metric": "cpu_aidle",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsReadLatencyHistogram_mean": {
-            "metric": "hbase.regionserver.fsReadLatencyHistogram_mean",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/totalStaticIndexSizeKB": {
-            "metric": "regionserver.Server.staticIndexSize",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/checkAndDelete/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.checkAndDelete.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getFromOnlineRegions_num_ops": {
-            "metric": "rpc.rpc.getFromOnlineRegions_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/mutationsWithoutWALCount": {
-            "metric": "regionserver.Server.mutationsWithoutWALCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/disk/disk_total": {
-            "metric": "disk_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/get/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.get.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/getRequestLatency_median": {
-            "metric": "regionserver.Server.Get_median",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/openScanner_avg_time": {
-            "metric": "rpc.rpc.openScanner_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/RpcSlowResponse_num_ops": {
-            "metric": "rpc.rpc.RpcSlowResponse_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/splitRegion/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.splitRegion.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/isAborted_avg_time": {
-            "metric": "rpc.rpc.isAborted_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getClosestRowBefore/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.getClosestRowBefore.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/flushRegion/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.flushRegion.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/flushSize_avg_time": {
-            "metric": "hbase.regionserver.flushSize_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getBlockCacheColumnFamilySummaries/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.getBlockCacheColumnFamilySummaries.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/commitPending_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.commitPending_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getClosestRowBefore_avg_time": {
-            "metric": "rpc.rpc.getClosestRowBefore_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_speed": {
-            "metric": "cpu_speed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_max": {
-            "metric": "regionserver.Server.Delete_max",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/get/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.get.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/put_num_ops": {
-            "metric": "rpc.rpc.put_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/move_avg_time": {
-            "metric": "rpc.rpc.move_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/percentFilesLocal": {
-            "metric": "regionserver.Server.percentFilesLocal",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsWriteLatency_avg_time": {
-            "metric": "hbase.regionserver.fsWriteLatency_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/increment/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.increment.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/openRegion/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.openRegion.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/SentBytes": {
-            "metric": "rpc.rpc.SentBytes",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getTask_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.getTask_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/addColumn_num_ops": {
-            "metric": "rpc.rpc.addColumn_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/logWarn": {
-            "metric": "jvm.JvmMetrics.LogWarn",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/maxMemoryM": {
-            "metric": "jvm.metrics.maxMemoryM",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/threadsTimedWaiting": {
-            "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/gcCount": {
-            "metric": "jvm.JvmMetrics.GcCount",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getOnlineRegions_num_ops": {
-            "metric": "rpc.rpc.getOnlineRegions_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/flushTime_avg_time": {
-            "metric": "hbase.regionserver.flushTime_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/done_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.done_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getProtocolVersion_num_ops": {
-            "metric": "rpc.rpc.getProtocolVersion_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/memHeapUsedM": {
-            "metric": "jvm.JvmMetrics.MemHeapUsedM",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/unlockRow_num_ops": {
-            "metric": "rpc.rpc.unlockRow_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/disk/disk_free": {
-            "metric": "disk_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/threadsWaiting": {
-            "metric": "jvm.JvmMetrics.ThreadsWaiting",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/slowGetCount": {
-            "metric": "regionserver.Server.slowGetCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/replicateLogEntries/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.replicateLogEntries.aboveOneSec._avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/shutdown_avg_time": {
-            "metric": "rpc.rpc.shutdown_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/regionServerStartup_num_ops": {
-            "metric": "rpc.rpc.regionServerStartup_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_system": {
-            "metric": "cpu_system",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/requests": {
-            "metric": "regionserver.Server.totalRequestCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsReadLatencyHistogram_99th_percentile": {
-            "metric": "hbase.regionserver.fsReadLatencyHistogram_99th_percentile",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsWriteLatencyHistogram_99th_percentile": {
-            "metric": "hbase.regionserver.fsWriteLatencyHistogram_99th_percentile",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/storefiles": {
-            "metric": "regionserver.Server.storeFileCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/next_num_ops": {
-            "metric": "rpc.rpc.next_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getBlockCacheColumnFamilySummaries_avg_time": {
-            "metric": "rpc.rpc.getBlockCacheColumnFamilySummaries_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/slowDeleteCount": {
-            "metric": "regionserver.Server.slowDeleteCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/checkAndDelete_avg_time": {
-            "metric": "rpc.rpc.checkAndDelete_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/closeRegion/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.closeRegion.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getHServerInfo_avg_time": {
-            "metric": "rpc.rpc.getHServerInfo_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/logInfo": {
-            "metric": "jvm.JvmMetrics.LogInfo",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getZooKeeper_avg_time": {
-            "metric": "rpc.rpc.getZooKeeper_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/hlogFileCount": {
-            "metric": "hbase.regionserver.hlogFileCount",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/getRequestLatency_95th_percentile": {
-            "metric": "regionserver.Server.Get_95th_percentile",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_95th_percentile": {
-            "metric": "regionserver.Server.Delete_95th_percentile",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/memstoreSizeMB": {
-            "metric": "regionserver.Server.memStoreSize",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/fsReadLatencyHistogram_median": {
-            "metric": "hbase.regionserver.fsReadLatencyHistogram_median",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getProtocolSignature_num_ops": {
-            "metric": "rpc.rpc.getProtocolSignature_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/getProtocolVersion/aboveOneSec/_num_ops": {
-            "metric": "rpc.rpc.getProtocolVersion.aboveOneSec._num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_75th_percentile": {
-            "metric": "regionserver.Server.Delete_75th_percentile",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthorizationSuccesses": {
-            "metric": "rpc.rpc.rpcAuthorizationSuccesses",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/totalStaticBloomSizeKB": {
-            "metric": "regionserver.Server.staticBloomSize",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/checkAndPut_num_ops": {
-            "metric": "rpc.rpc.checkAndPut_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/increment_avg_time": {
-            "metric": "rpc.rpc.increment_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          }
-        }
-      },
-      {
-        "type": "jmx",
-        "metrics": {
-          "metrics/hbase/regionserver/slowPutCount": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowPutCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/percentFilesLocal": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.percentFilesLocal",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_min": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_min",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/blockCacheFree": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheFreeSize",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/mutationsWithoutWALSize": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.mutationsWithoutWALSize",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/blockCacheMissCount": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheMissCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/flushQueueSize": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.flushQueueLength",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_99th_percentile": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_99th_percentile",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/getRequestLatency_num_ops": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/slowAppendCount": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowAppendCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/blockCacheSize": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheSize",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/putRequestLatency_num_ops": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/slowIncrementCount": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowIncrementCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/blockCacheEvictedCount": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheEvictionCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/putRequestLatency_95th_percentile": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_95th_percentile",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/compactionQueueSize": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.compactionQueueLength",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/putRequestLatency_median": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_median",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_mean": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_mean",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/slowGetCount": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowGetCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/blockCacheCount": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/getRequestLatency_75th_percentile": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_75th_percentile",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/readRequestsCount": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.readRequestCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/putRequestLatency_min": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_min",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/storefileIndexSizeMB": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.storeFileIndexSize",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_median": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_median",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/putRequestLatency_max": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_max",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/totalStaticIndexSizeKB": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.staticIndexSize",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_num_ops": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/putRequestLatency_mean": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_mean",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/requests": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.totalRequestCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/storefiles": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.storeFileCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/mutationsWithoutWALCount": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.mutationsWithoutWALCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/writeRequestsCount": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.writeRequestCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/getRequestLatency_median": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_median",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/slowDeleteCount": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.slowDeleteCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/putRequestLatency_99th_percentile": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_99th_percentile",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/stores": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.storeCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/getRequestLatency_min": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_min",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/getRequestLatency_95th_percentile": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_95th_percentile",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_95th_percentile": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_95th_percentile",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/memstoreSizeMB": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.memStoreSize",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/getRequestLatency_max": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_max",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/getRequestLatency_mean": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_mean",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_75th_percentile": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_75th_percentile",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/deleteRequestLatency_max": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Delete_max",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/putRequestLatency_75th_percentile": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Mutate_75th_percentile",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/regions": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.regionCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/totalStaticBloomSizeKB": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.staticBloomSize",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/blockCacheHitCount": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.blockCacheHitCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/hbase/regionserver/getRequestLatency_99th_percentile": {
-            "metric": "Hadoop:service=HBase,name=RegionServer,sub=Server.Get_99th_percentile",
-            "pointInTime": true,
-            "temporal": false
-          }
-        }
-      }
-    ],
-    "HostComponent": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "metrics/hbase/regionserver/compactionTime_avg_time": {
-            "metric": "hbase.regionserver.compactionTime_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/closeRegion_num_ops": {
-            "metric": "rpc.rpc.closeRegion_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/hbase/regionserver/mutationsWithoutWALSize": {
-            "metric": "regionserver.Server.mutationsWithoutWALSize",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/unassign_num_ops": {
-            "metric": "rpc.rpc.unassign_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/modifyTable_num_ops": {
-            "metric": "rpc.rpc.modifyTable_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/splitRegion/aboveOneSec/_avg_time": {
-            "metric": "rpc.rpc.splitRegion.aboveOneSec._avg_time",
-            "pointInTime":

<TRUNCATED>

[32/37] AMBARI-4341. Rename 2.0.8 to 2.1.1 in the stack definition. (mahadev)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_templeton_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_templeton_status.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_templeton_status.sh
new file mode 100644
index 0000000..7fbc4c4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_templeton_status.sh
@@ -0,0 +1,45 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+# out='{"status":"ok","version":"v1"}<status_code:200>'
+HOST=$1
+PORT=$2
+VERSION=$3
+SEC_ENABLED=$4
+if [[ "$SEC_ENABLED" == "true" ]]; then 
+  NAGIOS_KEYTAB=$5
+  NAGIOS_USER=$6
+  KINIT_PATH=$7
+  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
+  if [[ "$?" -ne 0 ]]; then
+    echo "CRITICAL: Error doing kinit for nagios [$out1]";
+    exit 2;
+  fi
+fi
+regex="^.*\"status\":\"ok\".*<status_code:200>$"
+out=`curl --negotiate -u : -s -w '<status_code:%{http_code}>' http://$HOST:$PORT/templeton/$VERSION/status 2>&1`
+if [[ $out =~ $regex ]]; then
+  out=`echo "$out" | sed -e 's/{/[/g' | sed -e 's/}/]/g'` 
+  echo "OK: WebHCat Server status [$out]";
+  exit 0;
+fi
+echo "CRITICAL: Error accessing WebHCat Server, status [$out]";
+exit 2;

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_webui.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_webui.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_webui.sh
new file mode 100644
index 0000000..b23045e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_webui.sh
@@ -0,0 +1,87 @@
+#!/bin/bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+checkurl () {
+  url=$1
+  curl $url -o /dev/null
+  echo $?
+}
+
+service=$1
+host=$2
+port=$3
+
+if [[ -z "$service" || -z "$host" ]]; then
+  echo "UNKNOWN: Invalid arguments; Usage: check_webui.sh service_name host_name";
+  exit 3;
+fi
+
+case "$service" in
+
+jobtracker) 
+    jtweburl="http://$host:$port"
+    if [[ `checkurl "$jtweburl"` -ne 0 ]]; then 
+      echo "WARNING: Jobtracker web UI not accessible : $jtweburl";
+      exit 1;
+    fi
+    ;;
+namenode)
+    nnweburl="http://$host:$port"
+    if [[ `checkurl "$nnweburl"` -ne 0 ]] ; then 
+      echo "WARNING: NameNode Web UI not accessible : $nnweburl";
+      exit 1;
+    fi
+    ;;
+jobhistory)
+    jhweburl="http://$host:$port/jobhistoryhome.jsp"
+    if [[ `checkurl "$jhweburl"` -ne 0 ]]; then 
+      echo "WARNING: HistoryServer Web UI not accessible : $jhweburl";
+      exit 1;
+    fi
+    ;;
+hbase)
+    hbaseweburl="http://$host:$port/master-status"
+    if [[ `checkurl "$hbaseweburl"` -ne 0 ]]; then 
+      echo "WARNING: HBase Master Web UI not accessible : $hbaseweburl";
+      exit 1;
+    fi
+    ;;
+resourcemanager)
+    rmweburl="http://$host:$port/cluster"
+    if [[ `checkurl "$rmweburl"` -ne 0 ]]; then 
+      echo "WARNING: ResourceManager Web UI not accessible : $rmweburl";
+      exit 1;
+    fi
+    ;;
+historyserver2)
+    hsweburl="http://$host:$port/jobhistory"
+    if [[ `checkurl "$hsweburl"` -ne 0 ]]; then 
+      echo "WARNING: HistoryServer Web UI not accessible : $hsweburl";
+      exit 1;
+    fi
+    ;;
+*) echo "UNKNOWN: Invalid service name [$service], valid options [jobtracker|jobhistory|hbase|namenode|resourcemanager|historyserver2]"
+   exit 3
+   ;;
+esac
+
+echo "OK: Successfully accessed $service Web UI"
+exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/hdp_nagios_init.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/hdp_nagios_init.php b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/hdp_nagios_init.php
new file mode 100644
index 0000000..487eb43
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/hdp_nagios_init.php
@@ -0,0 +1,81 @@
+<?php
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* Common functions called from other alerts
+ *
+ */
+ 
+ /*
+ * Function for kinit. Checks if security enabled and klist for this principal doesn't returns nothing,
+ * make kinit call in this case.
+ */
+  function kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name) {
+    if($security_enabled === 'true') {
+    
+      $is_logined = is_logined($principal_name);
+      
+      if (!$is_logined)
+        $status = kinit($kinit_path_local, $keytab_path, $principal_name);
+      else
+        $status = array(0, '');
+    } else {
+      $status = array(0, '');
+    }
+  
+    return $status;
+  }
+  
+  
+  /*
+  * Checks if user is logined on kerberos
+  */
+  function is_logined($principal_name) {
+    $check_cmd = "klist|grep $principal_name 1> /dev/null 2>/dev/null ; [[ $? != 0 ]] && echo 1";
+    $check_output =  shell_exec($check_cmd);
+    
+    if ($check_output)
+      return false;
+    else
+      return true;
+  }
+
+  /*
+  * Runs kinit command.
+  */
+  function kinit($kinit_path_local, $keytab_path, $principal_name) {
+    $init_cmd = "$kinit_path_local -kt $keytab_path $principal_name 2>&1";
+    $kinit_output = shell_exec($init_cmd);
+    if ($kinit_output) 
+      $status = array(1, $kinit_output);
+    else
+      $status = array(0, '');
+      
+    return $status;
+  }
+
+  function logout() {
+    if (shell_exec("rm -f /tmp/krb5cc_".trim(shell_exec('id -u'))) == "" ) 
+      $status = true;
+    else
+      $status = false;
+      
+    return $status;
+  }
+ 
+ ?>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/functions.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/functions.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/functions.py
new file mode 100644
index 0000000..964225e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/functions.py
@@ -0,0 +1,31 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+from resource_management.libraries.script.config_dictionary import UnknownConfiguration
+
+def get_port_from_url(address):
+  if not is_empty(address):
+    return address.split(':')[-1]
+  else:
+    return address
+  
+def is_empty(var):
+  return isinstance(var, UnknownConfiguration)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/nagios.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/nagios.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/nagios.py
new file mode 100644
index 0000000..af09e87
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/nagios.py
@@ -0,0 +1,97 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+from nagios_server_config import nagios_server_config
+
+def nagios():
+  import params
+
+  File( params.nagios_httpd_config_file,
+    owner = params.nagios_user,
+    group = params.nagios_group,
+    content = Template("nagios.conf.j2"),
+    mode   = 0644
+  )
+
+  # enable snmpd
+  Execute( "service snmpd start; chkconfig snmpd on",
+    path = "/usr/local/bin/:/bin/:/sbin/"
+  )
+  
+  Directory( params.conf_dir,
+    owner = params.nagios_user,
+    group = params.nagios_group
+  )
+
+  Directory( [params.plugins_dir, params.nagios_obj_dir])
+
+  Directory( params.nagios_pid_dir,
+    owner = params.nagios_user,
+    group = params.nagios_group,
+    mode = 0755,
+    recursive = True
+  )
+
+  Directory( [params.nagios_var_dir, params.check_result_path, params.nagios_rw_dir],
+    owner = params.nagios_user,
+    group = params.nagios_group,
+    recursive = True
+  )
+  
+  Directory( [params.nagios_log_dir, params.nagios_log_archives_dir],
+    owner = params.nagios_user,
+    group = params.nagios_group,
+    mode = 0755
+  )
+
+  nagios_server_config()
+
+  set_web_permisssions()
+
+  File( format("{conf_dir}/command.cfg"),
+    owner = params.nagios_user,
+    group = params.nagios_group
+  )
+  
+  
+def set_web_permisssions():
+  import params
+
+  cmd = format("{htpasswd_cmd} -c -b  /etc/nagios/htpasswd.users {nagios_web_login} {nagios_web_password}")
+  test = format("grep {nagios_web_login} /etc/nagios/htpasswd.users")
+  Execute( cmd,
+    not_if = test
+  )
+
+  File( "/etc/nagios/htpasswd.users",
+    owner = params.nagios_user,
+    group = params.nagios_group,
+    mode  = 0640
+  )
+
+  if System.get_instance().platform == "suse":
+    command = format("usermod -G {nagios_group} wwwrun")
+  else:
+    command = format("usermod -a -G {nagios_group} apache")
+  
+  Execute( command)

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/nagios_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/nagios_server.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/nagios_server.py
new file mode 100644
index 0000000..02685c7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/nagios_server.py
@@ -0,0 +1,87 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+from nagios import nagios
+from nagios_service import nagios_service
+
+         
+class NagiosServer(Script):
+  def install(self, env):
+    remove_conflicting_packages()
+    self.install_packages(env)
+    self.configure(env)
+    
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    nagios()
+
+    
+  def start(self, env):
+    import params
+    env.set_params(params)
+
+    self.configure(env) # done for updating configs after Security enabled
+    nagios_service(action='start')
+
+    
+  def stop(self, env):
+    import params
+    env.set_params(params)
+    
+    nagios_service(action='stop')
+
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.nagios_pid_file)
+    
+def remove_conflicting_packages():  
+  Package( 'hdp_mon_nagios_addons',
+    action = "remove"
+  )
+
+  Package( 'nagios-plugins',
+    action = "remove"
+  )
+
+  Execute( "rpm -e --allmatches --nopostun nagios",
+    path    = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
+    ignore_failures = True 
+  )
+
+def main():
+  command_type = sys.argv[1] if len(sys.argv)>1 else "install"
+  print "Running "+command_type
+  command_data_file = '/var/lib/ambari-agent/data/command-3.json'
+  basedir = '/root/ambari/ambari-server/src/main/resources/stacks/HDP/2.0._/services/NAGIOS/package'
+  stroutfile = '/1.txt'
+  sys.argv = ["", command_type, command_data_file, basedir, stroutfile]
+  
+  NagiosServer().execute()
+  
+if __name__ == "__main__":
+  #main()
+  NagiosServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/nagios_server_config.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/nagios_server_config.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/nagios_server_config.py
new file mode 100644
index 0000000..b3e639c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/nagios_server_config.py
@@ -0,0 +1,91 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+def nagios_server_config():
+  import params
+  
+  nagios_server_configfile( 'nagios.cfg', 
+                            config_dir = params.conf_dir, 
+                            group = params.nagios_group
+  )
+  nagios_server_configfile( 'resource.cfg', 
+                            config_dir = params.conf_dir, 
+                            group = params.nagios_group
+  )
+  nagios_server_configfile( 'hadoop-hosts.cfg')
+  nagios_server_configfile( 'hadoop-hostgroups.cfg')
+  nagios_server_configfile( 'hadoop-servicegroups.cfg')
+  nagios_server_configfile( 'hadoop-services.cfg')
+  nagios_server_configfile( 'hadoop-commands.cfg')
+  nagios_server_configfile( 'contacts.cfg')
+  
+  if System.get_instance().platform != "suse":
+    nagios_server_configfile( 'nagios',
+                              config_dir = '/etc/init.d/', 
+                              mode = 0755, 
+                              owner = 'root', 
+                              group = 'root'
+    )
+
+  nagios_server_check( 'check_cpu.pl')
+  nagios_server_check( 'check_datanode_storage.php')
+  nagios_server_check( 'check_aggregate.php')
+  nagios_server_check( 'check_hdfs_blocks.php')
+  nagios_server_check( 'check_hdfs_capacity.php')
+  nagios_server_check( 'check_rpcq_latency.php')
+  nagios_server_check( 'check_webui.sh')
+  nagios_server_check( 'check_name_dir_status.php')
+  nagios_server_check( 'check_oozie_status.sh')
+  nagios_server_check( 'check_templeton_status.sh')
+  nagios_server_check( 'check_hive_metastore_status.sh')
+  nagios_server_check( 'check_hue_status.sh')
+  nagios_server_check( 'check_mapred_local_dir_used.sh')
+  nagios_server_check( 'check_nodemanager_health.sh')
+  nagios_server_check( 'check_namenodes_ha.sh')
+  nagios_server_check( 'hdp_nagios_init.php')
+
+
+def nagios_server_configfile(
+  name,
+  owner = None,
+  group = None,
+  config_dir = None,
+  mode = None
+):
+  import params
+  owner = params.nagios_user if not owner else owner
+  group = params.user_group if not group else group
+  config_dir = params.nagios_obj_dir if not config_dir else config_dir
+  
+  TemplateConfig( format("{config_dir}/{name}"),
+    owner          = owner,
+    group          = group,
+    mode           = mode
+  )
+
+def nagios_server_check(name):
+  File( format("{plugins_dir}/{name}"),
+    content = StaticFile(name), 
+    mode = 0755
+  )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/nagios_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/nagios_service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/nagios_service.py
new file mode 100644
index 0000000..cc411b5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/nagios_service.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+def nagios_service(action='start'): # start or stop
+  import params
+
+  if action == 'start':
+   command = "service nagios start"
+  elif action == 'stop':
+   command = format("service nagios stop && rm -f {nagios_pid_file}")
+
+  Execute( command,
+     path    = "/usr/local/bin/:/bin/:/sbin/"      
+  )
+  MonitorWebserver("restart")
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/params.py
new file mode 100644
index 0000000..8694dff
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/params.py
@@ -0,0 +1,168 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+from functions import get_port_from_url
+import status_params
+
+# server configurations
+config = Script.get_config()
+
+conf_dir = "/etc/nagios"
+nagios_var_dir = "/var/nagios"
+nagios_rw_dir = "/var/nagios/rw"
+plugins_dir = "/usr/lib64/nagios/plugins"
+nagios_obj_dir = "/etc/nagios/objects"
+check_result_path = "/var/nagios/spool/checkresults"
+nagios_httpd_config_file = format("/etc/httpd/conf.d/nagios.conf")
+nagios_log_dir = "/var/log/nagios"
+nagios_log_archives_dir = format("{nagios_log_dir}/archives")
+nagios_host_cfg = format("{nagios_obj_dir}/hadoop-hosts.cfg")
+nagios_lookup_daemon_str = "/usr/sbin/nagios"
+nagios_pid_dir = status_params.nagios_pid_dir
+nagios_pid_file = status_params.nagios_pid_file
+nagios_resource_cfg = format("{conf_dir}/resource.cfg")
+nagios_hostgroup_cfg = format("{nagios_obj_dir}/hadoop-hostgroups.cfg")
+nagios_servicegroup_cfg = format("{nagios_obj_dir}/hadoop-servicegroups.cfg")
+nagios_service_cfg = format("{nagios_obj_dir}/hadoop-services.cfg")
+nagios_command_cfg = format("{nagios_obj_dir}/hadoop-commands.cfg")
+eventhandlers_dir = "/usr/lib/nagios/eventhandlers"
+nagios_principal_name = default("nagios_principal_name", "nagios")
+hadoop_ssl_enabled = False
+
+namenode_metadata_port = "8020"
+oozie_server_port = "11000"
+# different to HDP2    
+namenode_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.http.address'])
+# different to HDP2  
+snamenode_port = get_port_from_url(config['configurations']['hdfs-site']["dfs.secondary.http.address"])
+
+hbase_master_rpc_port = "60000"
+rm_port = get_port_from_url(config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'])
+nm_port = "8042"
+hs_port = get_port_from_url(config['configurations']['mapred-site']['mapreduce.history.server.http.address'])
+journalnode_port = get_port_from_url(config['configurations']['hdfs-site']['dfs.journalnode.http-address'])
+datanode_port = config['configurations']['hdfs-site']['ambari.dfs.datanode.http.port']
+flume_port = "4159"
+hive_metastore_port = config['configurations']['global']['hive_metastore_port'] #"9083"
+templeton_port = config['configurations']['webhcat-site']['templeton.port'] #"50111"
+hbase_rs_port = "60030"
+
+# this 4 is different for HDP2
+jtnode_port = get_port_from_url(config['configurations']['mapred-site']['mapred.job.tracker.http.address'])
+jobhistory_port = get_port_from_url(config['configurations']['mapred-site']['mapreduce.history.server.http.address'])
+tasktracker_port = "50060"
+mapred_local_dir = config['configurations']['mapred-site']['mapred.local.dir']
+
+# this is different for HDP2
+nn_metrics_property = "FSNamesystemMetrics"
+clientPort = config['configurations']['global']['clientPort'] #ZK 
+
+
+java64_home = config['hostLevelParams']['java_home']
+security_enabled = config['configurations']['global']['security_enabled']
+
+nagios_keytab_path = default("nagios_keytab_path", "/etc/security/keytabs/nagios.service.keytab")
+kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+
+dfs_ha_enabled = False
+dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
+dfs_ha_namenode_ids = default(format("hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
+if dfs_ha_namenode_ids:
+  dfs_ha_namenode_ids_array_len = len(dfs_ha_namenode_ids.split(","))
+  if dfs_ha_namenode_ids_array_len > 1:
+    dfs_ha_enabled = True
+
+ganglia_port = "8651"
+ganglia_collector_slaves_port = "8660"
+ganglia_collector_namenode_port = "8661"
+ganglia_collector_jobtracker_port = "8662"
+ganglia_collector_hbase_port = "8663"
+ganglia_collector_rm_port = "8664"
+ganglia_collector_nm_port = "8660"
+ganglia_collector_hs_port = "8666"
+  
+all_ping_ports = config['clusterHostInfo']['all_ping_ports']
+
+if System.get_instance().platform == "suse":
+  nagios_p1_pl = "/usr/lib/nagios/p1.pl"
+  htpasswd_cmd = "htpasswd2"
+else:
+  nagios_p1_pl = "/usr/bin/p1.pl"
+  htpasswd_cmd = "htpasswd"
+  
+nagios_user = config['configurations']['global']['nagios_user']
+nagios_group = config['configurations']['global']['nagios_group']
+nagios_web_login = config['configurations']['global']['nagios_web_login']
+nagios_web_password = config['configurations']['global']['nagios_web_password']
+user_group = config['configurations']['global']['user_group']
+nagios_contact = config['configurations']['global']['nagios_contact']
+
+namenode_host = default("/clusterHostInfo/namenode_host", None)
+_snamenode_host = default("/clusterHostInfo/snamenode_host", None)
+_jtnode_host = default("/clusterHostInfo/jtnode_host", None)
+_slave_hosts = default("/clusterHostInfo/slave_hosts", None)
+_journalnode_hosts = default("/clusterHostInfo/journalnode_hosts", None)
+_zkfc_hosts = default("/clusterHostInfo/zkfc_hosts", None)
+_rm_host = default("/clusterHostInfo/rm_host", None)
+_nm_hosts = default("/clusterHostInfo/nm_hosts", None)
+_hs_host = default("/clusterHostInfo/hs_host", None)
+_zookeeper_hosts = default("/clusterHostInfo/zookeeper_hosts", None)
+_flume_hosts = default("/clusterHostInfo/flume_hosts", None)
+_nagios_server_host = default("/clusterHostInfo/nagios_server_host",None)
+_ganglia_server_host = default("/clusterHostInfo/ganglia_server_host",None)
+
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts",None)
+_hive_server_host = default("/clusterHostInfo/hive_server_host",None)
+_oozie_server = default("/clusterHostInfo/oozie_server",None)
+_webhcat_server_host = default("/clusterHostInfo/webhcat_server_host",None)
+# can differ on HDP2
+_mapred_tt_hosts = _slave_hosts
+#if hbase_rs_hosts not given it is assumed that region servers on same nodes as slaves
+_hbase_rs_hosts = default("/clusterHostInfo/hbase_rs_hosts", _slave_hosts)
+_hue_server_host = default("/clusterHostInfo/hue_server_host", None)
+all_hosts = config['clusterHostInfo']['all_hosts']
+
+
+hostgroup_defs = {
+    'namenode' : namenode_host,
+    'snamenode' : _snamenode_host,
+    'slaves' : _slave_hosts,
+    # no in HDP2
+    'tasktracker-servers' : _mapred_tt_hosts,
+    'agent-servers' : all_hosts,
+    'nagios-server' : _nagios_server_host,
+    'jobtracker' : _jtnode_host,
+    'ganglia-server' : _ganglia_server_host,
+    'flume-servers' : _flume_hosts,
+    'zookeeper-servers' : _zookeeper_hosts,
+    'hbasemasters' : hbase_master_hosts,
+    'hiveserver' : _hive_server_host,
+    'region-servers' : _hbase_rs_hosts,
+    'oozie-server' : _oozie_server,
+    'webhcat-server' : _webhcat_server_host,
+    'hue-server' : _hue_server_host,
+    'resourcemanager' : _rm_host,
+    'nodemanagers' : _nm_hosts,
+    'historyserver2' : _hs_host,
+    'journalnodes' : _journalnode_hosts
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/status_params.py
new file mode 100644
index 0000000..33b35fe
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/scripts/status_params.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+nagios_pid_dir = "/var/run/nagios"
+nagios_pid_file = format("{nagios_pid_dir}/nagios.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/contacts.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/contacts.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/contacts.cfg.j2
new file mode 100644
index 0000000..9dada51
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/contacts.cfg.j2
@@ -0,0 +1,91 @@
+###############################################################################
+# CONTACTS.CFG - SAMPLE CONTACT/CONTACTGROUP DEFINITIONS
+#
+# Last Modified: 05-31-2007
+#
+# NOTES: This config file provides you with some example contact and contact
+#        group definitions that you can reference in host and service
+#        definitions.
+#       
+#        You don't need to keep these definitions in a separate file from your
+#        other object definitions.  This has been done just to make things
+#        easier to understand.
+#
+###############################################################################
+
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+
+###############################################################################
+###############################################################################
+#
+# CONTACTS
+#
+###############################################################################
+###############################################################################
+
+# Just one contact defined by default - the Nagios admin (that's you)
+# This contact definition inherits a lot of default values from the 'generic-contact' 
+# template which is defined elsewhere.
+
+define contact{
+        contact_name    {{nagios_web_login}}                                        ; Short name of user
+        use             generic-contact                                             ; Inherit default values from generic-contact template (defined above)
+        alias           Nagios Admin                                                ; Full name of user
+
+        email           {{nagios_contact}}	; <<***** CHANGE THIS TO YOUR EMAIL ADDRESS ******
+        }
+
+# Contact which writes all Nagios alerts to the system logger.
+define contact{
+        contact_name                    sys_logger         ; Short name of user
+        use                             generic-contact    ; Inherit default values from generic-contact template (defined above)
+        alias                           System Logger      ; Full name of user
+        host_notifications_enabled      1
+        service_notifications_enabled   1
+        service_notification_period     24x7
+        host_notification_period        24x7
+        service_notification_options    w,u,c,r,s
+        host_notification_options       d,u,r,s
+        can_submit_commands             1
+        retain_status_information       1
+        service_notification_commands   service_sys_logger
+        host_notification_commands      host_sys_logger
+        }
+
+###############################################################################
+###############################################################################
+#
+# CONTACT GROUPS
+#
+###############################################################################
+###############################################################################
+
+# We only have one contact in this simple configuration file, so there is
+# no need to create more than one contact group.
+
+define contactgroup {
+        contactgroup_name       admins
+        alias                   Nagios Administrators
+        members                 {{nagios_web_login}},sys_logger
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/hadoop-commands.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/hadoop-commands.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/hadoop-commands.cfg.j2
new file mode 100644
index 0000000..e47a09e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/hadoop-commands.cfg.j2
@@ -0,0 +1,114 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+{% if env.system.platform != "suse" %}
+# 'check_cpu' check remote cpu load
+define command {
+        command_name    check_cpu
+        command_line    $USER1$/check_cpu.pl -H $HOSTADDRESS$ -C hadoop -w $ARG1$ -c $ARG2$
+       }
+{% endif %}
+
+# Check data node storage full 
+define command {
+        command_name    check_datanode_storage
+        command_line    php $USER1$/check_datanode_storage.php -h $HOSTADDRESS$ -p $ARG1$ -w $ARG2$ -c $ARG3$ -e $ARG4$ -k $ARG5$ -r $ARG6$ -t $ARG7$ -s $ARG8$
+       }
+
+define command{
+        command_name    check_hdfs_blocks
+        command_line    php $USER1$/check_hdfs_blocks.php -h $ARG1$ -p $ARG2$ -w $ARG3$ -c $ARG4$ -s $ARG5$ -e $ARG6$ -k $ARG7$ -r $ARG8$ -t $ARG9$ -u $ARG10$
+       }
+
+define command{
+        command_name    check_hdfs_capacity
+        command_line    php $USER1$/check_hdfs_capacity.php -h $ARG1$ -p $ARG2$ -w $ARG3$ -c $ARG4$ -e $ARG5$ -k $ARG6$ -r $ARG7$ -t $ARG8$ -s $ARG9$
+       }
+
+define command{
+        command_name    check_aggregate
+        command_line    php $USER1$/check_aggregate.php -f /var/nagios/status.dat -s 1 -t service -n $ARG1$ -w $ARG2$ -c $ARG3$
+       }
+
+define command{
+        command_name    check_rpcq_latency
+        command_line    php $USER1$/check_rpcq_latency.php -h $HOSTADDRESS$ -p $ARG2$ -n $ARG1$ -w $ARG3$ -c $ARG4$ -e $ARG5$ -k $ARG6$ -r $ARG7$ -t $ARG8$ -s $ARG9$
+       }
+
+define command{
+        command_name    check_nagios
+        command_line    $USER1$/check_nagios -e $ARG1$ -F $ARG2$ -C $ARG3$ 
+       }
+
+define command{
+        command_name    check_webui
+        command_line    $USER1$/check_webui.sh $ARG1$ $HOSTADDRESS$ $ARG2$
+       }
+
+define command{
+        command_name    check_name_dir_status
+        command_line    php $USER1$/check_name_dir_status.php -h $HOSTADDRESS$ -p $ARG1$ -e $ARG2$ -k $ARG3$ -r $ARG4$ -t $ARG5$ -s $ARG6$
+       }
+
+define command{
+        command_name    check_oozie_status
+        command_line    $USER1$/check_oozie_status.sh $HOSTADDRESS$ $ARG1$ $ARG2$ $ARG3$ $ARG4$ $ARG5$ $ARG6$ $ARG7$
+       }
+
+define command{
+        command_name    check_templeton_status
+        command_line    $USER1$/check_templeton_status.sh $HOSTADDRESS$ $ARG1$ $ARG2$ $ARG3$ $ARG4$ $ARG5$ $ARG6$ $ARG7$
+       }
+
+define command{
+        command_name    check_hive_metastore_status
+        command_line    $USER1$/check_hive_metastore_status.sh $HOSTADDRESS$ $ARG1$ $ARG2$ $ARG3$ $ARG4$ $ARG5$ $ARG6$ $ARG7$
+       }
+define command{
+        command_name    check_hue_status
+        command_line    $USER1$/check_hue_status.sh
+       }
+
+define command{
+       command_name    check_mapred_local_dir_used_space
+       command_line    $USER1$/check_mapred_local_dir_used.sh $ARG1$ $ARG2$
+       }
+
+define command{
+       command_name    check_namenodes_ha
+       command_line    $USER1$/check_namenodes_ha.sh $ARG1$ $ARG2$
+       }
+
+define command{
+        command_name    check_nodemanager_health
+        command_line    $USER1$/check_nodemanager_health.sh $HOSTADDRESS$ $ARG1$
+       }
+
+define command{
+        command_name    host_sys_logger
+        command_line    $USER1$/sys_logger.py $HOSTSTATETYPE$ $HOSTATTEMPT$ $HOSTSTATE$ "Host::Ping" "Event Host=$HOSTADDRESS$($HOSTSTATE$), $HOSTOUTPUT$ $LONGHOSTOUTPUT$"
+       }
+
+define command{
+        command_name    service_sys_logger
+        command_line    $USER1$/sys_logger.py $SERVICESTATETYPE$ $SERVICEATTEMPT$ $SERVICESTATE$ "$SERVICEDESC$" "Event Host=$HOSTADDRESS$ Service Description=$SERVICEDESC$($SERVICESTATE$), $SERVICEOUTPUT$ $LONGSERVICEOUTPUT$"
+       }
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2
new file mode 100644
index 0000000..d24e5cd
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/hadoop-hostgroups.cfg.j2
@@ -0,0 +1,33 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+{% for name, hosts in hostgroup_defs.iteritems() %}
+{% if hosts %}
+define hostgroup {
+        hostgroup_name  {{name}}
+        alias           {{name}}
+        members         {{','.join(hosts)}}
+}
+{% endif %}
+{% endfor %}
+
+define hostgroup {
+        hostgroup_name  all-servers
+        alias           All Servers
+        members         {{','.join(all_hosts)}}
+}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2
new file mode 100644
index 0000000..778e4f8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/hadoop-hosts.cfg.j2
@@ -0,0 +1,34 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+{% for host in all_hosts %}
+define host {
+        alias        {{host}}
+        host_name    {{host}}
+        use          linux-server
+        address      {{host}}
+        check_interval         0.25
+        retry_interval         0.25
+        max_check_attempts     4
+        notifications_enabled     1
+        first_notification_delay  0     # Send notification soon after change in the hard state
+        notification_interval     0     # Send the notification once
+        notification_options      d,u,r
+}
+
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2
new file mode 100644
index 0000000..233051f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/hadoop-servicegroups.cfg.j2
@@ -0,0 +1,98 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+{% if hostgroup_defs['namenode'] or
+  hostgroup_defs['snamenode']  or
+  hostgroup_defs['slaves'] %}
+define servicegroup {
+  servicegroup_name  HDFS
+  alias  HDFS Checks
+}
+{% endif %}
+{%if hostgroup_defs['jobtracker'] or
+  hostgroup_defs['historyserver2']-%}
+define servicegroup {
+  servicegroup_name  MAPREDUCE
+  alias  MAPREDUCE Checks
+}
+{% endif %}
+{%if hostgroup_defs['resourcemanager'] or
+  hostgroup_defs['nodemanagers'] %}
+define servicegroup {
+  servicegroup_name  YARN
+  alias  YARN Checks
+}
+{% endif %}
+{%if hostgroup_defs['flume-servers'] %}
+define servicegroup {
+  servicegroup_name  FLUME
+  alias  FLUME Checks
+}
+{% endif %}
+{%if hostgroup_defs['hbasemasters'] %}
+define servicegroup {
+  servicegroup_name  HBASE
+  alias  HBASE Checks
+}
+{% endif %}
+{% if hostgroup_defs['oozie-server'] %}
+define servicegroup {
+  servicegroup_name  OOZIE
+  alias  OOZIE Checks
+}
+{% endif %}
+{% if hostgroup_defs['webhcat-server'] %}
+define servicegroup {
+  servicegroup_name  WEBHCAT
+  alias  WEBHCAT Checks
+}
+{% endif %}
+{% if hostgroup_defs['nagios-server'] %}
+define servicegroup {
+  servicegroup_name  NAGIOS
+  alias  NAGIOS Checks
+}
+{% endif %}
+{% if hostgroup_defs['ganglia-server'] %}
+define servicegroup {
+  servicegroup_name  GANGLIA
+  alias  GANGLIA Checks
+}
+{% endif %}
+{% if hostgroup_defs['hiveserver'] %}
+define servicegroup {
+  servicegroup_name  HIVE-METASTORE
+  alias  HIVE-METASTORE Checks
+}
+{% endif %}
+{% if hostgroup_defs['zookeeper-servers'] %}
+define servicegroup {
+  servicegroup_name  ZOOKEEPER
+  alias  ZOOKEEPER Checks
+}
+{% endif %}
+define servicegroup {
+  servicegroup_name  AMBARI
+  alias  AMBARI Checks
+}
+{% if hostgroup_defs['hue-server'] %}
+define servicegroup {
+  servicegroup_name  HUE
+  alias  HUE Checks
+}
+{% endif %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/hadoop-services.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/hadoop-services.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/hadoop-services.cfg.j2
new file mode 100644
index 0000000..d3e5e24
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/templates/hadoop-services.cfg.j2
@@ -0,0 +1,714 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+{# TODO: Look for { or } in created file #}
+# NAGIOS SERVER Check (status log update)
+{% if hostgroup_defs['nagios-server'] %}
+define service {
+        name                            hadoop-service
+        use                             generic-service
+        notification_options            w,u,c,r,f,s
+        first_notification_delay        0
+        notification_interval           0                 # Send the notification once
+        contact_groups                  admins
+        notifications_enabled           1
+        event_handler_enabled           1
+        register                        0
+}
+
+define service {        
+        hostgroup_name          nagios-server        
+        use                     hadoop-service
+        service_description     NAGIOS::Nagios status log freshness
+        servicegroups           NAGIOS
+        check_command           check_nagios!10!/var/nagios/status.dat!{{nagios_lookup_daemon_str}}
+        normal_check_interval   5
+        retry_check_interval    0.5
+        max_check_attempts      2
+}
+
+# NAGIOS SERVER HDFS Checks
+define service {
+        hostgroup_name          nagios-server
+        use                     hadoop-service
+        service_description     HDFS::Percent DataNodes with space available
+        servicegroups           HDFS
+        check_command           check_aggregate!"DATANODE::DataNode space"!10%!30%
+        normal_check_interval   2
+        retry_check_interval    1 
+        max_check_attempts      1
+}
+
+define service {
+        hostgroup_name          nagios-server
+        use                     hadoop-service
+        service_description     HDFS::Percent DataNodes live
+        servicegroups           HDFS
+        check_command           check_aggregate!"DATANODE::DataNode process"!10%!30%
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      3
+}
+
+# AMBARI AGENT Checks
+{% for hostname in all_hosts %}
+define service {
+        host_name	        {{ hostname }}
+        use                     hadoop-service
+        service_description     AMBARI::Ambari Agent process
+        servicegroups           AMBARI
+        check_command           check_tcp!{{all_ping_ports[loop.index-1]}}!-w 1 -c 1
+        normal_check_interval   1
+        retry_check_interval    0.25
+        max_check_attempts      4
+}
+
+{% endfor %}
+
+# NAGIOS SERVER ZOOKEEPER Checks
+{% if hostgroup_defs['zookeeper-servers'] %}
+define service {
+        hostgroup_name          nagios-server
+        use                     hadoop-service
+        service_description     ZOOKEEPER::Percent ZooKeeper Servers live
+        servicegroups           ZOOKEEPER
+        check_command           check_aggregate!"ZOOKEEPER::ZooKeeper Server process"!35%!70%
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      3
+}
+{% endif %}
+
+# NAGIOS SERVER HBASE Checks
+{% if hostgroup_defs['hbasemasters'] %}
+define service {
+        hostgroup_name          nagios-server
+        use                     hadoop-service
+        service_description     HBASE::Percent RegionServers live
+        servicegroups           HBASE
+        check_command           check_aggregate!"REGIONSERVER::RegionServer process"!10%!30%
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      3
+}
+{% endif %}
+{% endif %}
+
+
+
+# GANGLIA SERVER Checks
+{% if hostgroup_defs['ganglia-server'] %}
+define service {
+        hostgroup_name          ganglia-server
+        use                     hadoop-service
+        service_description     GANGLIA::Ganglia Server process
+        servicegroups           GANGLIA
+        check_command           check_tcp!{{ ganglia_port }}!-w 1 -c 1
+        normal_check_interval   0.25
+        retry_check_interval    0.25
+        max_check_attempts      4
+}
+
+define service {
+        hostgroup_name          ganglia-server
+        use                     hadoop-service
+        service_description     GANGLIA::Ganglia Monitor process for NameNode
+        servicegroups           GANGLIA
+        check_command           check_tcp!{{ ganglia_collector_namenode_port }}!-w 1 -c 1
+        normal_check_interval   0.25
+        retry_check_interval    0.25
+        max_check_attempts      4
+}
+
+{% if hostgroup_defs['jobtracker'] %}
+define service {
+        hostgroup_name          ganglia-server
+        use                     hadoop-service
+        service_description     GANGLIA::Ganglia Monitor process for JobTracker
+        servicegroups           GANGLIA
+        check_command           check_tcp!{{ ganglia_collector_jobtracker_port }}!-w 1 -c 1
+        normal_check_interval   0.25
+        retry_check_interval    0.25
+        max_check_attempts      4
+}
+{% endif %}
+
+{% if hostgroup_defs['hbasemasters'] %}
+define service {
+        hostgroup_name          ganglia-server
+        use                     hadoop-service
+        service_description     GANGLIA::Ganglia Monitor process for HBase Master
+        servicegroups           GANGLIA
+        check_command           check_tcp!{{ ganglia_collector_hbase_port }}!-w 1 -c 1
+        normal_check_interval   0.25
+        retry_check_interval    0.25
+        max_check_attempts      4
+}
+{% endif %}
+
+{% if hostgroup_defs['resourcemanager'] %}
+define service {
+        hostgroup_name          ganglia-server
+        use                     hadoop-service
+        service_description     GANGLIA::Ganglia Monitor process for ResourceManager
+        servicegroups           GANGLIA
+        check_command           check_tcp!{{ ganglia_collector_rm_port }}!-w 1 -c 1
+        normal_check_interval   0.25
+        retry_check_interval    0.25
+        max_check_attempts      4
+}
+{% endif %}
+
+{% if hostgroup_defs['historyserver2'] %}
+define service {
+        hostgroup_name          ganglia-server
+        use                     hadoop-service
+        service_description     GANGLIA::Ganglia Monitor process for HistoryServer
+        servicegroups           GANGLIA
+        check_command           check_tcp!{{ ganglia_collector_hs_port }}!-w 1 -c 1
+        normal_check_interval   0.25
+        retry_check_interval    0.25
+        max_check_attempts      4
+}
+{% endif %}
+
+{% endif %}
+
+{% if hostgroup_defs['snamenode'] %}
+# Secondary namenode checks
+define service {
+        hostgroup_name          snamenode
+        use                     hadoop-service
+        service_description     NAMENODE::Secondary NameNode process
+        servicegroups           HDFS
+        check_command           check_tcp!{{ snamenode_port }}!-w 1 -c 1
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      3
+}
+{% endif %}
+
+
+{% if hostgroup_defs['namenode'] %}
+# HDFS Checks
+{%  for namenode_hostname in namenode_host %}
+{# TODO: check if we can get rid of str, lower #}
+define service {
+        host_name               {{ namenode_hostname }}
+        use                     hadoop-service
+        service_description     NAMENODE::NameNode edit logs directory status on {{ namenode_hostname }}
+        servicegroups           HDFS
+        check_command           check_name_dir_status!{{ namenode_port }}!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
+        normal_check_interval   0.5
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+
+{% if env.system.platform != "suse" %}
+define service {
+        host_name               {{ namenode_hostname }}
+        use                     hadoop-service
+        service_description     NAMENODE::NameNode host CPU utilization on {{ namenode_hostname }}
+        servicegroups           HDFS
+        check_command           check_cpu!200%!250%
+        normal_check_interval   5
+        retry_check_interval    2
+        max_check_attempts      5
+}
+{% endif %}
+
+define service {
+        host_name               {{ namenode_hostname }}
+        use                     hadoop-service
+        service_description     NAMENODE::NameNode Web UI on {{ namenode_hostname }}
+        servicegroups           HDFS
+        check_command           check_webui!namenode!{{ namenode_port }}
+        normal_check_interval   1
+        retry_check_interval    1
+        max_check_attempts      3
+}
+
+define service {
+        host_name               {{ namenode_hostname }}
+        use                     hadoop-service
+        service_description     NAMENODE::NameNode process on {{ namenode_hostname }}
+        servicegroups           HDFS
+        check_command           check_tcp!{{ namenode_metadata_port }}!-w 1 -c 1
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      3
+}
+
+define service {
+        host_name               {{ namenode_hostname }}
+        use                     hadoop-service
+        service_description     HDFS::NameNode RPC latency on {{ namenode_hostname }}
+        servicegroups           HDFS
+        check_command           check_rpcq_latency!NameNode!{{ namenode_port }}!3000!5000!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
+        normal_check_interval   5
+        retry_check_interval    1
+        max_check_attempts      5
+}
+
+{%  endfor  %}
+
+define service {
+        hostgroup_name          nagios-server
+        use                     hadoop-service
+        service_description     HDFS::Blocks health
+        servicegroups           HDFS
+        check_command           check_hdfs_blocks!$HOSTGROUPMEMBERS:namenode$!{{ namenode_port }}!0%!0%!{{ nn_metrics_property }}!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
+        normal_check_interval   2
+        retry_check_interval    1 
+        max_check_attempts      1
+}
+
+define service {
+        hostgroup_name          nagios-server
+        use                     hadoop-service
+        service_description     HDFS::HDFS capacity utilization
+        servicegroups           HDFS
+        check_command           check_hdfs_capacity!$HOSTGROUPMEMBERS:namenode$!{{ namenode_port }}!80%!90%!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
+        normal_check_interval   10
+        retry_check_interval    1 
+        max_check_attempts      1
+}
+
+{% endif %}
+
+# MAPREDUCE Checks
+{% if hostgroup_defs['jobtracker'] %}
+define service {
+        hostgroup_name          jobtracker
+        use                     hadoop-service
+        service_description     JOBTRACKER::JobTracker Web UI
+        servicegroups           MAPREDUCE
+        check_command           check_webui!jobtracker!{{ jtnode_port }}
+        normal_check_interval   1
+        retry_check_interval    1
+        max_check_attempts      3
+}
+
+define service {
+        hostgroup_name          jobtracker
+        use                     hadoop-service
+        service_description     JOBTRACKER::HistoryServer Web UI
+        servicegroups           MAPREDUCE
+        check_command           check_webui!jobhistory!{{ jobhistory_port }}
+        normal_check_interval   1
+        retry_check_interval    1
+        max_check_attempts      3
+}
+{% if env.system.platform != "suse" %}
+define service {
+        hostgroup_name          jobtracker
+        use                     hadoop-service
+        service_description     JOBTRACKER::JobTracker CPU utilization
+        servicegroups           MAPREDUCE
+        check_command           check_cpu!200%!250%
+        normal_check_interval   5
+        retry_check_interval    2 
+        max_check_attempts      5
+}
+{% endif %}
+
+define service {
+        hostgroup_name          jobtracker
+        use                     hadoop-service
+        use                     hadoop-service
+        service_description     JOBTRACKER::JobTracker process
+        servicegroups           MAPREDUCE
+        check_command           check_tcp!{{ jtnode_port }}!-w 1 -c 1
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      4
+}
+
+define service {
+        hostgroup_name          jobtracker
+        use                     hadoop-service
+        service_description     MAPREDUCE::JobTracker RPC latency
+        servicegroups           MAPREDUCE
+        check_command           check_rpcq_latency!JobTracker!{{ jtnode_port }}!3000!5000!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
+        normal_check_interval   5
+        retry_check_interval    1 
+        max_check_attempts      5
+}
+
+{% if hostgroup_defs['tasktracker-servers'] %}
+define service {
+        hostgroup_name          nagios-server
+        use                     hadoop-service
+        service_description     MAPREDUCE::Percent TaskTrackers live
+        servicegroups           MAPREDUCE
+        check_command           check_aggregate!"TASKTRACKER::TaskTracker process"!10%!30%
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      3
+}
+
+# MAPREDUCE::TASKTRACKER Checks 
+define service {
+        hostgroup_name          tasktracker-servers
+        use                     hadoop-service
+        service_description     TASKTRACKER::TaskTracker process
+        servicegroups           MAPREDUCE
+        check_command           check_tcp!{{ tasktracker_port }}!-w 1 -c 1
+        normal_check_interval   1
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+
+# MAPREDUCE::TASKTRACKER Mapreduce local dir used space
+define service {
+        hostgroup_name          tasktracker-servers
+        use                     hadoop-service
+        service_description     ::MapReduce local dir space
+        servicegroups           MAPREDUCE
+        check_command           check_mapred_local_dir_used_space!{{ mapred_local_dir }}!85%
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      3
+}
+
+{% endif %}
+{% endif %}
+
+{% if hostgroup_defs['resourcemanager'] %}
+# YARN::RESOURCEMANAGER Checks 
+define service {
+        hostgroup_name          resourcemanager
+        use                     hadoop-service
+        service_description     RESOURCEMANAGER::ResourceManager Web UI
+        servicegroups           YARN
+        check_command           check_webui!resourcemanager!{{ rm_port }}
+        normal_check_interval   1
+        retry_check_interval    1
+        max_check_attempts      3
+}
+
+{% if env.system.platform != "suse" %}
+define service {
+        hostgroup_name          resourcemanager
+        use                     hadoop-service
+        service_description     RESOURCEMANAGER::ResourceManager CPU utilization
+        servicegroups           YARN
+        check_command           check_cpu!200%!250%
+        normal_check_interval   5
+        retry_check_interval    2 
+        max_check_attempts      5
+}
+{% endif %}
+
+define service {
+        hostgroup_name          resourcemanager
+        use                     hadoop-service
+        service_description     RESOURCEMANAGER::ResourceManager RPC latency
+        servicegroups           YARN
+        check_command           check_rpcq_latency!ResourceManager!{{ rm_port }}!3000!5000!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
+        normal_check_interval   5
+        retry_check_interval    1 
+        max_check_attempts      5
+}
+
+define service {
+        hostgroup_name          resourcemanager
+        use                     hadoop-service
+        service_description     RESOURCEMANAGER::ResourceManager process
+        servicegroups           YARN
+        check_command           check_tcp!{{ rm_port }}!-w 1 -c 1
+        normal_check_interval   1
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+{%  endif %}
+
+{% if hostgroup_defs['nodemanagers'] %}
+# YARN::NODEMANAGER Checks
+define service {
+        hostgroup_name          nodemanagers
+        use                     hadoop-service
+        service_description     NODEMANAGER::NodeManager process
+        servicegroups           YARN
+        check_command           check_tcp!{{ nm_port }}!-w 1 -c 1
+        normal_check_interval   1
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+
+define service {
+        hostgroup_name          nodemanagers
+        use                     hadoop-service
+        service_description     NODEMANAGER::NodeManager health
+        servicegroups           YARN
+        check_command           check_nodemanager_health!{{ nm_port }}!{{ str(security_enabled).lower() }}!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}
+        normal_check_interval   1
+        retry_check_interval    1
+        max_check_attempts      3
+}
+define service {
+        hostgroup_name          nagios-server
+        use                     hadoop-service
+        service_description     NODEMANAGER::Percent NodeManagers live
+        servicegroups           YARN
+        check_command           check_aggregate!"NODEMANAGER::NodeManager process"!10%!30%
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      3
+}
+{%  endif %}
+
+{% if hostgroup_defs['historyserver2'] %}
+# MAPREDUCE::JOBHISTORY Checks
+define service {
+        hostgroup_name          historyserver2
+        use                     hadoop-service
+        service_description     JOBHISTORY::HistoryServer Web UI
+        servicegroups           MAPREDUCE
+        check_command           check_webui!historyserver2!{{ hs_port }}
+        normal_check_interval   1
+        retry_check_interval    1
+        max_check_attempts      3
+}
+
+{% if env.system.platform != "suse" %}
+define service {
+        hostgroup_name          historyserver2
+        use                     hadoop-service
+        service_description     JOBHISTORY::HistoryServer CPU utilization
+        servicegroups           MAPREDUCE
+        check_command           check_cpu!200%!250%
+        normal_check_interval   5
+        retry_check_interval    2 
+        max_check_attempts      5
+}
+{%  endif %}
+
+define service {
+        hostgroup_name          historyserver2
+        use                     hadoop-service
+        service_description     JOBHISTORY::HistoryServer process
+        servicegroups           MAPREDUCE
+        check_command           check_tcp!{{ hs_port }}!-w 1 -c 1
+        normal_check_interval   1
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+
+{%  endif %}
+
+{% if hostgroup_defs['journalnodes'] %}
+# Journalnode checks
+define service {
+        hostgroup_name          journalnodes
+        use                     hadoop-service
+        service_description     JOURNALNODE::JournalNode process
+        servicegroups           HDFS
+        check_command           check_tcp!{{ journalnode_port }}!-w 1 -c 1
+        normal_check_interval   1
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+
+{% if dfs_ha_enabled %}
+define service {
+        hostgroup_name          nagios-server
+        use                     hadoop-service
+        service_description     HDFS::Percent JournalNodes live
+        servicegroups           HDFS
+        check_command           check_aggregate!"JOURNALNODE::JournalNode process"!33%!50%
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      3
+}
+{% endif %}
+{% endif %}
+
+{% if hostgroup_defs['slaves'] %}
+# HDFS::DATANODE Checks
+define service {
+        hostgroup_name          slaves
+        use                     hadoop-service
+        service_description     DATANODE::DataNode process
+        servicegroups           HDFS
+        check_command           check_tcp!{{datanode_port}}!-w 1 -c 1
+        normal_check_interval   1
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+
+define service {
+        hostgroup_name          slaves
+        use                     hadoop-service
+        service_description     DATANODE::DataNode space
+        servicegroups           HDFS
+        check_command           check_datanode_storage!{{ datanode_port }}!90%!90%!{{ str(hadoop_ssl_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}!{{ str(security_enabled).lower() }}
+        normal_check_interval   5
+        retry_check_interval    1
+        max_check_attempts      2
+}
+
+{% endif %}
+
+{% if hostgroup_defs['flume-servers'] %}
+# FLUME Checks
+define service {
+        hostgroup_name          flume-servers
+        use                     hadoop-service
+        service_description     FLUME::Flume Agent process
+        servicegroups           FLUME
+        check_command           check_tcp!{{ flume_port }}!-w 1 -c 1
+        normal_check_interval   1
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+{% endif %}
+
+
+{% if hostgroup_defs['zookeeper-servers'] %}
+# ZOOKEEPER Checks
+define service {
+        hostgroup_name          zookeeper-servers
+        use                     hadoop-service
+        service_description     ZOOKEEPER::ZooKeeper Server process
+        servicegroups           ZOOKEEPER
+        check_command           check_tcp!{{ clientPort }}!-w 1 -c 1
+        normal_check_interval   1
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+{% endif %}
+
+{% if hostgroup_defs['hbasemasters'] %}
+# HBASE::REGIONSERVER Checks
+define service {
+        hostgroup_name          region-servers
+        use                     hadoop-service
+        service_description     REGIONSERVER::RegionServer process
+        servicegroups           HBASE
+        check_command           check_tcp!{{ hbase_rs_port }}!-w 1 -c 1
+        normal_check_interval   1
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+
+{# HBASE:: MASTER Checks
+# define service {
+#         hostgroup_name          hbasemasters
+#         use                     hadoop-service
+#         service_description     HBASEMASTER::HBase Master Web UI
+#         servicegroups           HBASE
+#         check_command           check_webui!hbase!{{ hbase_master_port }}
+#         normal_check_interval   1
+#         retry_check_interval    1
+#         max_check_attempts      3
+# #}
+{%  for hbasemaster in hbase_master_hosts  %}
+{% if env.system.platform != "suse" %}
+define service {
+        host_name               {{ hbasemaster }}
+        use                     hadoop-service
+        service_description     HBASEMASTER::HBase Master CPU utilization on {{ hbasemaster }}
+        servicegroups           HBASE
+        check_command           check_cpu!200%!250%
+        normal_check_interval   5
+        retry_check_interval    2 
+        max_check_attempts      5
+}
+{%  endif %}
+define service {
+        host_name               {{ hbasemaster }}
+        use                     hadoop-service
+        service_description     HBASEMASTER::HBase Master process on {{ hbasemaster }}
+        servicegroups           HBASE
+        check_command           check_tcp!{{ hbase_master_rpc_port }}!-w 1 -c 1
+        normal_check_interval   0.5
+        retry_check_interval    0.25
+        max_check_attempts      4
+}
+{% endfor %}
+{% endif %}
+
+{% if hostgroup_defs['hiveserver'] %}
+# HIVE Metastore check
+define service {
+        hostgroup_name          hiveserver
+        use                     hadoop-service
+        service_description     HIVE-METASTORE::Hive Metastore status
+        servicegroups           HIVE-METASTORE
+        {% if security_enabled %}
+        check_command           check_hive_metastore_status!{{ hive_metastore_port }}!{{ java64_home }}!true!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}
+        {% else %}
+        check_command           check_hive_metastore_status!{{ hive_metastore_port }}!{{ java64_home }}!false
+        {% endif %}
+        normal_check_interval   0.5
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+{% endif %}
+{% if hostgroup_defs['oozie-server'] %}
+# Oozie check
+define service {
+        hostgroup_name          oozie-server
+        use                     hadoop-service
+        service_description     OOZIE::Oozie Server status
+        servicegroups           OOZIE
+        {% if security_enabled %}
+        check_command           check_oozie_status!{{ oozie_server_port }}!{{ java64_home }}!true!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}
+        {% else %}
+        check_command           check_oozie_status!{{ oozie_server_port }}!{{ java64_home }}!false
+        {% endif %}
+        normal_check_interval   1
+        retry_check_interval    1
+        max_check_attempts      3
+}
+{% endif %}
+{% if hostgroup_defs['webhcat-server'] %}
+# WEBHCAT check
+define service {
+        hostgroup_name          webhcat-server
+        use                     hadoop-service
+        service_description     WEBHCAT::WebHCat Server status
+        servicegroups           WEBHCAT 
+        {% if security_enabled %}
+        check_command           check_templeton_status!{{ templeton_port }}!v1!{{ str(security_enabled).lower() }}!{{ nagios_keytab_path }}!{{ nagios_principal_name }}!{{ kinit_path_local }}
+        {% else %}
+        check_command           check_templeton_status!{{ templeton_port }}!v1!false
+        {% endif %}
+        normal_check_interval   1
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+{% endif %}
+
+{% if hostgroup_defs['hue-server'] %}
+define service {
+        hostgroup_name          hue-server
+        use                     hadoop-service
+        service_description     HUE::Hue Server status
+        servicegroups           HUE
+        check_command           check_hue_status
+        normal_check_interval   100
+        retry_check_interval    0.5
+        max_check_attempts      3
+}
+{% endif %}
+


[33/37] AMBARI-4341. Rename 2.0.8 to 2.1.1 in the stack definition. (mahadev)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/mysql_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/mysql_service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/mysql_service.py
new file mode 100644
index 0000000..cfb3e08
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/mysql_service.py
@@ -0,0 +1,38 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+
+def mysql_service(daemon_name=None, action='start'):
+  cmd = format('service {daemon_name} {action}')
+
+  if action == 'status':
+    logoutput = False
+  else:
+    logoutput = True
+
+  Execute(cmd,
+          path="/usr/local/bin/:/bin/:/sbin/",
+          tries=1,
+          logoutput=logoutput)
+
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/params.py
new file mode 100644
index 0000000..0cf89be
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/params.py
@@ -0,0 +1,123 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import status_params
+
+# server configurations
+config = Script.get_config()
+
+hive_metastore_user_name = config['configurations']['hive-site']['javax.jdo.option.ConnectionUserName']
+hive_server_conf_dir = "/etc/hive/conf.server"
+hive_jdbc_connection_url = config['configurations']['hive-site']['javax.jdo.option.ConnectionURL']
+
+hive_metastore_user_passwd = config['configurations']['hive-site']['javax.jdo.option.ConnectionPassword']
+
+#users
+hive_user = config['configurations']['global']['hive_user']
+hive_lib = '/usr/lib/hive/lib/'
+#JDBC driver jar name
+hive_jdbc_driver = default('hive_jdbc_driver', 'com.mysql.jdbc.Driver')
+if hive_jdbc_driver == "com.mysql.jdbc.Driver":
+  jdbc_jar_name = "mysql-connector-java.jar"
+elif hive_jdbc_driver == "oracle.jdbc.driver.OracleDriver":
+  jdbc_jar_name = "ojdbc6.jar"
+
+check_db_connection_jar_name = "DBConnectionVerification.jar"
+check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
+
+#common
+hive_metastore_port = config['configurations']['global']['hive_metastore_port']
+hive_var_lib = '/var/lib/hive'
+hive_server_host = config['clusterHostInfo']['hive_server_host']
+hive_url = format("jdbc:hive2://{hive_server_host}:10000")
+
+smokeuser = config['configurations']['global']['smokeuser']
+smoke_test_sql = "/tmp/hiveserver2.sql"
+smoke_test_path = "/tmp/hiveserver2Smoke.sh"
+smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
+
+security_enabled = config['configurations']['global']['security_enabled']
+
+kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+hive_metastore_keytab_path =  config['configurations']['hive-site']['hive.metastore.kerberos.keytab.file']
+
+#hive_env
+hive_conf_dir = "/etc/hive/conf"
+hive_dbroot = config['configurations']['global']['hive_dbroot']
+hive_log_dir = config['configurations']['global']['hive_log_dir']
+hive_pid_dir = status_params.hive_pid_dir
+hive_pid = status_params.hive_pid
+
+#hive-site
+hive_database_name = config['configurations']['global']['hive_database_name']
+
+#Starting hiveserver2
+start_hiveserver2_script = 'startHiveserver2.sh'
+
+hadoop_home = '/usr'
+
+##Starting metastore
+start_metastore_script = 'startMetastore.sh'
+hive_metastore_pid = status_params.hive_metastore_pid
+java_share_dir = '/usr/share/java'
+driver_curl_target = format("{java_share_dir}/{jdbc_jar_name}")
+
+hdfs_user =  config['configurations']['global']['hdfs_user']
+user_group = config['configurations']['global']['user_group']
+artifact_dir = "/tmp/HDP-artifacts/"
+
+target = format("{hive_lib}/{jdbc_jar_name}")
+
+jdk_location = config['hostLevelParams']['jdk_location']
+driver_curl_source = format("{jdk_location}/{jdbc_jar_name}")
+
+start_hiveserver2_path = "/tmp/start_hiveserver2_script"
+start_metastore_path = "/tmp/start_metastore_script"
+
+hive_aux_jars_path = config['configurations']['global']['hive_aux_jars_path']
+hadoop_heapsize = config['configurations']['global']['hadoop_heapsize']
+java64_home = config['hostLevelParams']['java_home']
+
+##### MYSQL
+
+db_name = config['configurations']['global']['hive_database_name']
+mysql_user = "mysql"
+mysql_group = 'mysql'
+mysql_host = config['clusterHostInfo']['hive_mysql_host']
+
+mysql_adduser_path = "/tmp/addMysqlUser.sh"
+
+########## HCAT
+
+hcat_conf_dir = '/etc/hcatalog/conf'
+
+metastore_port = 9933
+hcat_lib = '/usr/lib/hcatalog/share/hcatalog'
+
+hcat_dbroot = hcat_lib
+
+hcat_user = config['configurations']['global']['hcat_user']
+webhcat_user = config['configurations']['global']['webhcat_user']
+
+hcat_pid_dir = status_params.hcat_pid_dir
+hcat_log_dir = config['configurations']['global']['hcat_log_dir']   #hcat_log_dir
+
+hadoop_conf_dir = '/etc/hadoop/conf'

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/service_check.py
new file mode 100644
index 0000000..111e8a1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/service_check.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+from hcat_service_check import hcat_service_check
+
+class HiveServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    if params.security_enabled:
+      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser};")
+      hive_principal_ext = format("principal={hive_metastore_keytab_path}")
+      hive_url_ext = format("{hive_url}/\\;{hive_principal_ext}")
+      smoke_cmd = format("{kinit_cmd} env JAVA_HOME={java64_home} {smoke_test_path} {hive_url_ext} {smoke_test_sql}")
+    else:
+      smoke_cmd = format("env JAVA_HOME={java64_home} {smoke_test_path} {hive_url} {smoke_test_sql}")
+
+    File(params.smoke_test_path,
+         content=StaticFile('hiveserver2Smoke.sh'),
+         mode=0755
+    )
+
+    File(params.smoke_test_sql,
+         content=StaticFile('hiveserver2.sql')
+    )
+
+    Execute(smoke_cmd,
+            tries=3,
+            try_sleep=5,
+            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+            logoutput=True,
+            user=params.smokeuser)
+
+    hcat_service_check()
+
+if __name__ == "__main__":
+  HiveServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/status_params.py
new file mode 100644
index 0000000..7770975
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/scripts/status_params.py
@@ -0,0 +1,30 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+hive_pid_dir = config['configurations']['global']['hive_pid_dir']
+hive_pid = 'hive-server.pid'
+
+hive_metastore_pid = 'hive.pid'
+
+hcat_pid_dir = config['configurations']['global']['hcat_pid_dir'] #hcat_pid_dir

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/templates/hcat-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/templates/hcat-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/templates/hcat-env.sh.j2
new file mode 100644
index 0000000..2a35240
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/templates/hcat-env.sh.j2
@@ -0,0 +1,25 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+JAVA_HOME={{java64_home}}
+HCAT_PID_DIR={{hcat_pid_dir}}/
+HCAT_LOG_DIR={{hcat_log_dir}}/
+HCAT_CONF_DIR={{hcat_conf_dir}}
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+#DBROOT is the path where the connector jars are downloaded
+DBROOT={{hcat_dbroot}}
+USER={{hcat_user}}
+METASTORE_PORT={{metastore_port}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/templates/hive-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/templates/hive-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/templates/hive-env.sh.j2
new file mode 100644
index 0000000..548262a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/HIVE/package/templates/hive-env.sh.j2
@@ -0,0 +1,55 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements. See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership. The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Set Hive and Hadoop environment variables here. These variables can be used
+# to control the execution of Hive. It should be used by admins to configure
+# the Hive installation (so that users do not have to set environment variables
+# or set command line parameters to get correct behavior).
+#
+# The hive service being invoked (CLI/HWI etc.) is available via the environment
+# variable SERVICE
+
+# Hive Client memory usage can be an issue if a large number of clients
+# are running at the same time. The flags below have been useful in
+# reducing memory usage:
+#
+ if [ "$SERVICE" = "cli" ]; then
+   if [ -z "$DEBUG" ]; then
+     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:+UseParNewGC -XX:-UseGCOverheadLimit"
+   else
+     export HADOOP_OPTS="$HADOOP_OPTS -XX:NewRatio=12 -Xms10m -XX:MaxHeapFreeRatio=40 -XX:MinHeapFreeRatio=15 -XX:-UseGCOverheadLimit"
+   fi
+ fi
+
+# The heap size of the jvm stared by hive shell script can be controlled via:
+
+export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+
+# Larger heap size may be required when running queries over large number of files or partitions.
+# By default hive shell scripts use a heap size of 256 (MB).  Larger heap size would also be
+# appropriate for hive server (hwi etc).
+
+
+# Set HADOOP_HOME to point to a specific hadoop install directory
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}
+
+# Hive Configuration Directory can be controlled by:
+export HIVE_CONF_DIR={{conf_dir}}
+
+# Folder containing extra ibraries required for hive compilation/execution can be controlled by:
+# export HIVE_AUX_JARS_PATH=
+export HIVE_AUX_JARS_PATH={{hive_aux_jars_path}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/client.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/client.py
new file mode 100644
index 0000000..79c644d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/client.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+import sys
+from resource_management import *
+
+from mapreduce import mapreduce
+from service import service
+
+class Client(Script):
+
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    mapreduce()
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+  Client().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/historyserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/historyserver.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/historyserver.py
new file mode 100644
index 0000000..8eb2089
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/historyserver.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+import sys
+from resource_management import *
+
+from mapreduce import mapreduce
+from service import service
+
+class Historyserver(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+  
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    mapreduce()
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    service('historyserver',
+            action='start'
+    )
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    service('historyserver',
+            action='stop'
+    )
+
+  def status(self, env):
+     import status_params
+     env.set_params(status_params)
+     check_process_status(status_params.historyserver_pid_file)
+
+if __name__ == "__main__":
+  Historyserver().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/jobtracker.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/jobtracker.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/jobtracker.py
new file mode 100644
index 0000000..8f7f1d7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/jobtracker.py
@@ -0,0 +1,104 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+import sys
+from resource_management import *
+
+from mapreduce import mapreduce
+from service import service
+
+class Jobtracker(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    mapreduce()
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    service('jobtracker',
+            action='start'
+    )
+    
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    service('jobtracker',
+            action='stop'
+    )
+    
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.jobtracker_pid_file)
+    pass
+
+  def decommission(self, env):
+    import params
+
+    env.set_params(params)
+
+    mapred_user = params.mapred_user
+    conf_dir = params.conf_dir
+    user_group = params.user_group
+
+    File(params.exclude_file_path,
+         content=Template("exclude_hosts_list.j2"),
+         owner=mapred_user,
+         group=user_group
+    )
+
+    ExecuteHadoop('mradmin -refreshNodes',
+                user=mapred_user,
+                conf_dir=conf_dir,
+                kinit_override=True)
+    pass
+
+if __name__ == "__main__":
+  Jobtracker().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/mapreduce.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/mapreduce.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/mapreduce.py
new file mode 100644
index 0000000..c5fd002
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/mapreduce.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+import sys
+
+
+def mapreduce():
+  import params
+
+  Directory([params.mapred_pid_dir,params.mapred_log_dir],
+            owner=params.mapred_user,
+            group=params.user_group,
+            recursive=True
+  )
+
+  Directory(params.mapred_local_dir,
+            owner=params.mapred_user,
+            mode=0755,
+            recursive=True
+  )
+
+  File(params.exclude_file_path,
+            owner=params.mapred_user,
+            group=params.user_group,
+  )
+
+  File(params.mapred_hosts_file_path,
+            owner=params.mapred_user,
+            group=params.user_group,
+  )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/params.py
new file mode 100644
index 0000000..7be5a7c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/params.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+from resource_management import *
+import status_params
+
+# server configurations
+config = Script.get_config()
+
+conf_dir = "/etc/hadoop/conf"
+
+mapred_user = status_params.mapred_user
+pid_dir_prefix = status_params.pid_dir_prefix
+mapred_pid_dir = status_params.mapred_pid_dir
+
+historyserver_pid_file = status_params.historyserver_pid_file
+jobtracker_pid_file = status_params.jobtracker_pid_file
+tasktracker_pid_file = status_params.tasktracker_pid_file
+
+hadoop_libexec_dir = '/usr/lib/hadoop/libexec'
+hadoop_bin = "/usr/lib/hadoop/bin"
+user_group = config['configurations']['global']['user_group']
+hdfs_log_dir_prefix = config['configurations']['global']['hdfs_log_dir_prefix']
+mapred_log_dir = format("{hdfs_log_dir_prefix}/{mapred_user}")
+mapred_local_dir = config['configurations']['mapred-site']['mapred.local.dir']
+
+hadoop_jar_location = "/usr/lib/hadoop/"
+smokeuser = config['configurations']['global']['smokeuser']
+security_enabled = config['configurations']['global']['security_enabled']
+smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
+kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+
+#exclude file
+mr_exclude_hosts = default("/clusterHostInfo/decom_tt_hosts", [])
+exclude_file_path = config['configurations']['mapred-site']['mapred.hosts.exclude']
+mapred_hosts_file_path = config['configurations']['mapred-site']['mapred.hosts']
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/service.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/service.py
new file mode 100644
index 0000000..f4aa91b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/service.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+
+def service(
+    name,
+    action='start'):
+
+  import params
+
+  pid_file = format("{mapred_pid_dir}/hadoop-{mapred_user}-{name}.pid")
+  hadoop_daemon = format("export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && {hadoop_bin}/hadoop-daemon.sh")
+  cmd = format("{hadoop_daemon} --config {conf_dir}")
+
+  if action == 'start':
+    daemon_cmd = format("{cmd} start {name}")
+    no_op = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
+    Execute(daemon_cmd,
+            user=params.mapred_user,
+            not_if=no_op
+    )
+
+    Execute(no_op,
+            user=params.mapred_user,
+            not_if=no_op,
+            initial_wait=5
+    )
+  elif action == 'stop':
+    daemon_cmd = format("{cmd} stop {name}")
+    rm_pid =  format("rm -f {pid_file}")
+
+    Execute(daemon_cmd,
+            user=params.mapred_user
+    )
+    Execute(rm_pid)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/service_check.py
new file mode 100644
index 0000000..c0a4a59
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/service_check.py
@@ -0,0 +1,89 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+class ServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    jar_location = params.hadoop_jar_location
+    input_file = 'mapredsmokeinput'
+    output_file = "mapredsmokeoutput"
+
+    cleanup_cmd = format("dfs -rmr {output_file} {input_file}")
+    create_file_cmd = format("{cleanup_cmd} ; hadoop dfs -put /etc/passwd {input_file}")
+    test_cmd = format("fs -test -e {output_file}")
+    run_wordcount_job = format("jar {jar_location}/hadoop-examples.jar wordcount {input_file} {output_file}")
+
+    if params.security_enabled:
+      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser};")
+
+      Execute(kinit_cmd,
+              user=params.smokeuser
+      )
+
+    ExecuteHadoop(create_file_cmd,
+                  tries=1,
+                  try_sleep=5,
+                  user=params.smokeuser,
+                  conf_dir=params.conf_dir
+    )
+
+    ExecuteHadoop(run_wordcount_job,
+                  tries=1,
+                  try_sleep=5,
+                  user=params.smokeuser,
+                  conf_dir=params.conf_dir,
+                  logoutput=True
+    )
+
+    ExecuteHadoop(test_cmd,
+                  user=params.smokeuser,
+                  conf_dir=params.conf_dir
+    )
+
+if __name__ == "__main__":
+  ServiceCheck().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/status_params.py
new file mode 100644
index 0000000..f964a76
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/status_params.py
@@ -0,0 +1,33 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+mapred_user = config['configurations']['global']['mapred_user']
+pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix']
+mapred_pid_dir = format("{pid_dir_prefix}/{mapred_user}")
+
+jobtracker_pid_file = format("{mapred_pid_dir}/hadoop-{mapred_user}-jobtracker.pid")
+tasktracker_pid_file = format("{mapred_pid_dir}/hadoop-{mapred_user}-tasktracker.pid")
+historyserver_pid_file = format("{mapred_pid_dir}/hadoop-{mapred_user}-historyserver.pid")
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/tasktracker.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/tasktracker.py b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/tasktracker.py
new file mode 100644
index 0000000..77d974b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/scripts/tasktracker.py
@@ -0,0 +1,104 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+import sys
+from resource_management import *
+
+from mapreduce import mapreduce
+from service import service
+
+class Tasktracker(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    mapreduce()
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env) # FOR SECURITY
+    service('tasktracker',
+            action='start'
+    )
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    service('tasktracker',
+            action='stop'
+    )
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.tasktracker_pid_file)
+
+if __name__ == "__main__":
+  Tasktracker().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/templates/exclude_hosts_list.j2
new file mode 100644
index 0000000..02fc5fe
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/MAPREDUCE/package/templates/exclude_hosts_list.j2
@@ -0,0 +1,3 @@
+{% for host in mr_exclude_hosts %}
+{{host}}
+{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_aggregate.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_aggregate.php b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_aggregate.php
new file mode 100644
index 0000000..f4063fb
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_aggregate.php
@@ -0,0 +1,243 @@
+<?php
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+  $options = getopt ("f:s:n:w:c:t:");
+  if (!array_key_exists('t', $options) || !array_key_exists('f', $options) || !array_key_exists('w', $options)
+      || !array_key_exists('c', $options) || !array_key_exists('s', $options)) {
+    usage();
+    exit(3);
+  }
+  $status_file=$options['f'];
+  $status_code=$options['s'];
+  $type=$options['t'];
+  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
+  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
+  if ($type == "service" && !array_key_exists('n', $options)) {
+    echo "Service description not provided -n option\n";
+    exit(3);
+  }
+  if ($type == "service") {
+    $service_name=$options['n'];
+    /* echo "DESC: " . $service_name . "\n"; */
+  }
+
+  $result = array();
+  $status_file_content = file_get_contents($status_file);
+
+  $counts;
+  if ($type == "service") {
+    $counts=query_alert_count($status_file_content, $service_name, $status_code);
+  } else {
+    $counts=query_host_count($status_file_content, $status_code);
+  }
+
+  if ($counts['total'] == 0) {
+    $percent = 0;
+  } else {
+    $percent = ($counts['actual']/$counts['total'])*100;
+  }
+  if ($percent >= $crit) {
+    echo "CRITICAL: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
+    exit (2);
+  }
+  if ($percent >= $warn) {
+    echo "WARNING: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
+    exit (1);
+  }
+  echo "OK: total:<" . $counts['total'] . ">, affected:<" . $counts['actual'] . ">\n";
+  exit(0);
+
+
+  # Functions
+  /* print usage */
+  function usage () {
+    echo "Usage: $0 -f <status_file_path> -t type(host/service) -s <status_codes> -n <service description> -w <warn%> -c <crit%>\n";
+  }
+
+  /* Query host count */
+  function query_host_count ($status_file_content, $status_code) {
+    $num_matches = preg_match_all("/hoststatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
+    $hostcounts_object = array ();
+    $total_hosts = 0;
+    $hosts = 0;
+    foreach ($matches[0] as $object) {
+      $total_hosts++;
+      if (getParameter($object, "current_state") == $status_code) {
+        $hosts++;
+      }
+    }
+    $hostcounts_object['total'] = $total_hosts;
+    $hostcounts_object['actual'] = $hosts;
+    return $hostcounts_object;
+  }
+
+  /* Query Alert counts */
+  function query_alert_count ($status_file_content, $service_name, $status_code) {
+    $num_matches = preg_match_all("/servicestatus \{([\S\s]*?)\}/", $status_file_content, $matches, PREG_PATTERN_ORDER);
+    $alertcounts_objects = array ();
+    $total_alerts=0;
+    $alerts=0;
+    foreach ($matches[0] as $object) {
+      if (getParameter($object, "service_description") == $service_name) {
+        $total_alerts++;
+        if (getParameter($object, "current_state") >= $status_code) {
+          $alerts++;
+        }
+      }
+    }
+    $alertcounts_objects['total'] = $total_alerts;
+    $alertcounts_objects['actual'] = $alerts;
+    return $alertcounts_objects;
+  }
+
+  function get_service_type($service_description)
+  {
+    $pieces = explode("::", $service_description);
+    switch ($pieces[0]) {
+      case "NAMENODE":
+        $pieces[0] = "HDFS";
+        break;
+      case "JOBTRACKER":
+        $pieces[0] = "MAPREDUCE";
+        break;
+      case "HBASEMASTER":
+        $pieces[0] = "HBASE";
+        break;
+      case "SYSTEM":
+      case "HDFS":
+      case "MAPREDUCE":
+      case "HBASE":
+        break;
+      default:
+        $pieces[0] = "UNKNOWN";
+    }
+    return $pieces[0];
+  }
+
+  function getParameter($object, $key)
+  {
+    $pattern="/\s" . $key . "[\s= ]*([\S, ]*)\n/";
+    $num_mat = preg_match($pattern, $object, $matches);
+    $value = "";
+    if ($num_mat) {
+      $value = $matches[1];
+    }
+    return $value;
+  }
+
+function indent($json) {
+
+    $result      = '';
+    $pos         = 0;
+    $strLen      = strlen($json);
+    $indentStr   = '  ';
+    $newLine     = "\n";
+    $prevChar    = '';
+    $outOfQuotes = true;
+
+    for ($i=0; $i<=$strLen; $i++) {
+
+        // Grab the next character in the string.
+        $char = substr($json, $i, 1);
+
+        // Are we inside a quoted string?
+        if ($char == '"' && $prevChar != '\\') {
+            $outOfQuotes = !$outOfQuotes;
+
+        // If this character is the end of an element,
+        // output a new line and indent the next line.
+        } else if(($char == '}' || $char == ']') && $outOfQuotes) {
+            $result .= $newLine;
+            $pos --;
+            for ($j=0; $j<$pos; $j++) {
+                $result .= $indentStr;
+            }
+        }
+
+        // Add the character to the result string.
+        $result .= $char;
+
+        // If the last character was the beginning of an element,
+        // output a new line and indent the next line.
+        if (($char == ',' || $char == '{' || $char == '[') && $outOfQuotes) {
+            $result .= $newLine;
+            if ($char == '{' || $char == '[') {
+                $pos ++;
+            }
+
+            for ($j = 0; $j < $pos; $j++) {
+                $result .= $indentStr;
+            }
+        }
+
+        $prevChar = $char;
+    }
+
+    return $result;
+}
+
+/* JSON documment format */
+/*
+{
+  "programstatus":{
+    "last_command_check":"1327385743"
+  },
+  "hostcounts":{
+    "up_nodes":"",
+    "down_nodes":""
+  },
+  "hoststatus":[
+    {
+      "host_name"="ip-10-242-191-48.ec2.internal",
+      "current_state":"0",
+      "last_hard_state":"0",
+      "plugin_output":"PING OK - Packet loss = 0%, RTA = 0.04 ms",
+      "last_check":"1327385564",
+      "current_attempt":"1",
+      "last_hard_state_change":"1327362079",
+      "last_time_up":"1327385574",
+      "last_time_down":"0",
+      "last_time_unreachable":"0",
+      "is_flapping":"0",
+      "last_check":"1327385574",
+      "servicestatus":[
+      ]
+    }
+  ],
+  "servicestatus":[
+    {
+      "service_type":"HDFS",  {HBASE, MAPREDUCE, HIVE, ZOOKEEPER}
+      "service_description":"HDFS Current Load",
+      "host_name"="ip-10-242-191-48.ec2.internal",
+      "current_attempt":"1",
+      "current_state":"0",
+      "plugin_output":"PING OK - Packet loss = 0%, RTA = 0.04 ms",
+      "last_hard_state_change":"1327362079",
+      "last_time_ok":"1327385479",
+      "last_time_warning":"0",
+      "last_time_unknown":"0",
+      "last_time_critical":"0",
+      "last_check":"1327385574",
+      "is_flapping":"0"
+    }
+  ]
+}
+*/
+
+?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_cpu.pl
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_cpu.pl b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_cpu.pl
new file mode 100644
index 0000000..a5680f7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_cpu.pl
@@ -0,0 +1,114 @@
+#!/usr/bin/perl -w 
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+use strict;
+use Net::SNMP;
+use Getopt::Long;
+
+# Variable
+my $base_proc = "1.3.6.1.2.1.25.3.3.1";   
+my $proc_load = "1.3.6.1.2.1.25.3.3.1.2"; 
+my $o_host = 	undef;
+my $o_community = undef;
+my $o_warn=	undef;
+my $o_crit=	undef;
+my $o_timeout = 15;
+my $o_port = 161;
+
+sub Usage {
+    print "Usage: $0 -H <host> -C <snmp_community> -w <warn level> -c <crit level>\n";
+}
+
+Getopt::Long::Configure ("bundling");
+GetOptions(
+  'H:s'   => \$o_host,	
+  'C:s'   => \$o_community,	
+  'c:s'   => \$o_crit,        
+  'w:s'   => \$o_warn
+          );
+if (!defined $o_host || !defined $o_community || !defined $o_crit || !defined $o_warn) {
+  Usage();
+  exit 3;
+}
+$o_warn =~ s/\%//g; 
+$o_crit =~ s/\%//g;
+alarm ($o_timeout);
+$SIG{'ALRM'} = sub {
+ print "Unable to contact host: $o_host\n";
+ exit 3;
+};
+
+# Connect to host
+my ($session,$error);
+($session, $error) = Net::SNMP->session(
+		-hostname  => $o_host,
+		-community => $o_community,
+		-port      => $o_port,
+		-timeout   => $o_timeout
+	  );
+if (!defined($session)) {
+   printf("Error opening session: %s.\n", $error);
+   exit 3;
+}
+
+my $exit_val=undef;
+my $resultat =  (Net::SNMP->VERSION < 4) ?
+	  $session->get_table($base_proc)
+	: $session->get_table(Baseoid => $base_proc);
+
+if (!defined($resultat)) {
+   printf("ERROR: Description table : %s.\n", $session->error);
+   $session->close;
+   exit 3;
+}
+
+$session->close;
+
+my ($cpu_used,$ncpu)=(0,0);
+foreach my $key ( keys %$resultat) {
+  if ($key =~ /$proc_load/) {
+    $cpu_used += $$resultat{$key};
+    $ncpu++;
+  }
+}
+
+if ($ncpu==0) {
+  print "Can't find CPU usage information : UNKNOWN\n";
+  exit 3;
+}
+
+$cpu_used /= $ncpu;
+
+print "$ncpu CPU, ", $ncpu==1 ? "load" : "average load";
+printf(" %.1f%%",$cpu_used);
+$exit_val=0;
+
+if ($cpu_used > $o_crit) {
+ print " > $o_crit% : CRITICAL\n";
+ $exit_val=2;
+} else {
+  if ($cpu_used > $o_warn) {
+   print " > $o_warn% : WARNING\n";
+   $exit_val=1;
+  }
+}
+print " < $o_warn% : OK\n" if ($exit_val eq 0);
+exit $exit_val;

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_datanode_storage.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_datanode_storage.php b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_datanode_storage.php
new file mode 100644
index 0000000..dee22b4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_datanode_storage.php
@@ -0,0 +1,100 @@
+<?php
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This plugin makes call to master node, get the jmx-json document
+ * check the storage capacity remaining on local datanode storage
+ */
+
+  include "hdp_nagios_init.php";
+
+  $options = getopt ("h:p:w:c:e:k:r:t:s:");
+  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options) 
+      || !array_key_exists('c', $options)) {
+    usage();
+    exit(3);
+  }
+
+  $host=$options['h'];
+  $port=$options['p'];
+  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
+  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
+  $keytab_path=$options['k'];
+  $principal_name=$options['r'];
+  $kinit_path_local=$options['t'];
+  $security_enabled=$options['s'];
+  $ssl_enabled=$options['e'];
+
+  /* Kinit if security enabled */
+  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
+  $retcode = $status[0];
+  $output = $status[1];
+  
+  if ($output != 0) {
+    echo "CRITICAL: Error doing kinit for nagios. $output";
+    exit (2);
+  }
+
+  $protocol = ($ssl_enabled == "true" ? "https" : "http");
+
+  /* Get the json document */
+  $ch = curl_init();
+  $username = rtrim(`id -un`, "\n");
+  curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=DataNode,name=FSDatasetState-*",
+                                CURLOPT_RETURNTRANSFER => true,
+                                CURLOPT_HTTPAUTH => CURLAUTH_ANY,
+                                CURLOPT_USERPWD => "$username:",
+                                CURLOPT_SSL_VERIFYPEER => FALSE ));
+  $json_string = curl_exec($ch);
+  $info = curl_getinfo($ch);
+  if (intval($info['http_code']) == 401){
+    logout();
+    $json_string = curl_exec($ch);
+  }
+  $info = curl_getinfo($ch);
+  curl_close($ch);
+  $json_array = json_decode($json_string, true);
+  $object = $json_array['beans'][0];
+  $cap_remain = $object['Remaining']; /* Total capacity - any extenal files created in data directories by non-hadoop app */
+  $cap_total = $object['Capacity']; /* Capacity used by all data partitions minus space reserved for M/R */
+  if (count($object) == 0) {
+    echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
+    exit(2);
+  }  
+  $percent_full = ($cap_total - $cap_remain)/$cap_total * 100;
+
+  $out_msg = "Capacity:[" . $cap_total . 
+             "], Remaining Capacity:[" . $cap_remain . 
+             "], percent_full:[" . $percent_full  . "]";
+  
+  if ($percent_full > $crit) {
+    echo "CRITICAL: " . $out_msg . "\n";
+    exit (2);
+  }
+  if ($percent_full > $warn) {
+    echo "WARNING: " . $out_msg . "\n";
+    exit (1);
+  }
+  echo "OK: " . $out_msg . "\n";
+  exit(0);
+
+  /* print usage */
+  function usage () {
+    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
+  }
+?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hdfs_blocks.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hdfs_blocks.php b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hdfs_blocks.php
new file mode 100644
index 0000000..19347b4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hdfs_blocks.php
@@ -0,0 +1,115 @@
+<?php
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This plugin makes call to master node, get the jmx-json document
+ * check the corrupt or missing blocks % is > threshod
+ * check_jmx -H hostaddress -p port -w 1% -c 1%
+ */
+
+  include "hdp_nagios_init.php";
+
+  $options = getopt ("h:p:w:c:s:e:k:r:t:u:");
+  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
+      || !array_key_exists('c', $options) || !array_key_exists('s', $options)) {
+    usage();
+    exit(3);
+  }
+
+  $hosts=$options['h'];
+  $port=$options['p'];
+  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
+  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
+  $nn_jmx_property=$options['s'];
+  $keytab_path=$options['k'];
+  $principal_name=$options['r'];
+  $kinit_path_local=$options['t'];
+  $security_enabled=$options['u'];
+  $ssl_enabled=$options['e'];
+
+  /* Kinit if security enabled */
+  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
+  $retcode = $status[0];
+  $output = $status[1];
+  
+  if ($output != 0) {
+    echo "CRITICAL: Error doing kinit for nagios. $output";
+    exit (2);
+  }
+
+  $protocol = ($ssl_enabled == "true" ? "https" : "http");
+
+
+  foreach (preg_split('/,/', $hosts) as $host) {
+    /* Get the json document */
+
+    $ch = curl_init();
+    $username = rtrim(`id -un`, "\n");
+    curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=".$nn_jmx_property,
+                                  CURLOPT_RETURNTRANSFER => true,
+                                  CURLOPT_HTTPAUTH => CURLAUTH_ANY,
+                                  CURLOPT_USERPWD => "$username:",
+                                  CURLOPT_SSL_VERIFYPEER => FALSE ));
+    $json_string = curl_exec($ch);
+    $info = curl_getinfo($ch);
+    if (intval($info['http_code']) == 401){
+      logout();
+      $json_string = curl_exec($ch);
+    }
+    $info = curl_getinfo($ch);
+    curl_close($ch);
+    $json_array = json_decode($json_string, true);
+    $m_percent = 0;
+    $c_percent = 0;
+    $object = $json_array['beans'][0];
+    $missing_blocks = $object['MissingBlocks'];
+    $corrupt_blocks = $object['CorruptBlocks'];
+    $total_blocks = $object['BlocksTotal'];
+    if (count($object) == 0) {
+      echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
+      exit(2);
+    }    
+    if($total_blocks == 0) {
+      $m_percent = 0;
+      $c_percent = 0;
+    } else {
+      $m_percent = ($missing_blocks/$total_blocks)*100;
+      $c_percent = ($corrupt_blocks/$total_blocks)*100;
+      break;
+    }
+  }
+  $out_msg = "corrupt_blocks:<" . $corrupt_blocks .
+             ">, missing_blocks:<" . $missing_blocks .
+             ">, total_blocks:<" . $total_blocks . ">";
+
+  if ($m_percent > $crit || $c_percent > $crit) {
+    echo "CRITICAL: " . $out_msg . "\n";
+    exit (2);
+  }
+  if ($m_percent > $warn || $c_percent > $warn) {
+    echo "WARNING: " . $out_msg . "\n";
+    exit (1);
+  }
+  echo "OK: " . $out_msg . "\n";
+  exit(0);
+
+  /* print usage */
+  function usage () {
+    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%> -s <namenode bean name> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
+  }
+?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hdfs_capacity.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hdfs_capacity.php b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hdfs_capacity.php
new file mode 100644
index 0000000..af72723
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hdfs_capacity.php
@@ -0,0 +1,109 @@
+<?php
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This plugin makes call to master node, get the jmx-json document
+ * check the % HDFS capacity used >= warn and critical limits.
+ * check_jmx -H hostaddress -p port -w 1 -c 1
+ */
+
+  include "hdp_nagios_init.php";
+
+  $options = getopt ("h:p:w:c:e:k:r:t:s:");
+  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
+      || !array_key_exists('c', $options)) {
+    usage();
+    exit(3);
+  }
+
+  $hosts=$options['h'];
+  $port=$options['p'];
+  $warn=$options['w']; $warn = preg_replace('/%$/', '', $warn);
+  $crit=$options['c']; $crit = preg_replace('/%$/', '', $crit);
+  $keytab_path=$options['k'];
+  $principal_name=$options['r'];
+  $kinit_path_local=$options['t'];
+  $security_enabled=$options['s'];
+  $ssl_enabled=$options['e'];
+
+  /* Kinit if security enabled */
+  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
+  $retcode = $status[0];
+  $output = $status[1];
+  
+  if ($output != 0) {
+    echo "CRITICAL: Error doing kinit for nagios. $output";
+    exit (2);
+  }
+
+  $protocol = ($ssl_enabled == "true" ? "https" : "http");
+
+
+  foreach (preg_split('/,/', $hosts) as $host) {
+    /* Get the json document */
+    $ch = curl_init();
+    $username = rtrim(`id -un`, "\n");
+    curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=FSNamesystemState",
+                                  CURLOPT_RETURNTRANSFER => true,
+                                  CURLOPT_HTTPAUTH => CURLAUTH_ANY,
+                                  CURLOPT_USERPWD => "$username:",
+                                  CURLOPT_SSL_VERIFYPEER => FALSE ));
+    $json_string = curl_exec($ch);
+    $info = curl_getinfo($ch);
+    if (intval($info['http_code']) == 401){
+      logout();
+      $json_string = curl_exec($ch);
+    }
+    $info = curl_getinfo($ch);
+    curl_close($ch);
+    $json_array = json_decode($json_string, true);
+    $percent = 0;
+    $object = $json_array['beans'][0];
+    $CapacityUsed = $object['CapacityUsed'];
+    $CapacityRemaining = $object['CapacityRemaining'];
+    if (count($object) == 0) {
+      echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
+      exit(2);
+    }    
+    $CapacityTotal = $CapacityUsed + $CapacityRemaining;
+    if($CapacityTotal == 0) {
+      $percent = 0;
+    } else {
+      $percent = ($CapacityUsed/$CapacityTotal)*100;
+      break;
+    }
+  }
+  $out_msg = "DFSUsedGB:<" . round ($CapacityUsed/(1024*1024*1024),1) .
+             ">, DFSTotalGB:<" . round($CapacityTotal/(1024*1024*1024),1) . ">";
+
+  if ($percent >= $crit) {
+    echo "CRITICAL: " . $out_msg . "\n";
+    exit (2);
+  }
+  if ($percent >= $warn) {
+    echo "WARNING: " . $out_msg . "\n";
+    exit (1);
+  }
+  echo "OK: " . $out_msg . "\n";
+  exit(0);
+
+  /* print usage */
+  function usage () {
+    echo "Usage: $0 -h <host> -p port -w <warn%> -c <crit%> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
+  }
+?>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hive_metastore_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hive_metastore_status.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hive_metastore_status.sh
new file mode 100644
index 0000000..640c077
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hive_metastore_status.sh
@@ -0,0 +1,45 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#The uri is of the form thrift://<hostname>:<port>
+HOST=$1
+PORT=$2
+JAVA_HOME=$3
+SEC_ENABLED=$4
+if [[ "$SEC_ENABLED" == "true" ]]; then
+  NAGIOS_KEYTAB=$5
+  NAGIOS_USER=$6
+  KINIT_PATH=$7
+  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
+  if [[ "$?" -ne 0 ]]; then
+    echo "CRITICAL: Error doing kinit for nagios [$out1]";
+    exit 2;
+  fi
+fi
+HCAT_URL=-Dhive.metastore.uris="thrift://$HOST:$PORT"
+export JAVA_HOME=$JAVA_HOME
+out=`hcat $HCAT_URL -e "show databases" 2>&1`
+if [[ "$?" -ne 0 ]]; then
+  echo "CRITICAL: Error accessing Hive Metastore status [$out]";
+  exit 2;
+fi
+echo "OK: Hive Metastore status OK";
+exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hue_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hue_status.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hue_status.sh
new file mode 100644
index 0000000..076d9b3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_hue_status.sh
@@ -0,0 +1,31 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+status=`/etc/init.d/hue status 2>&1`
+
+if [[ "$?" -ne 0 ]]; then
+	echo "WARNING: Hue is stopped";
+	exit 1;
+fi
+
+echo "OK: Hue is running";
+exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_mapred_local_dir_used.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_mapred_local_dir_used.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_mapred_local_dir_used.sh
new file mode 100644
index 0000000..15c85eb
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_mapred_local_dir_used.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+MAPRED_LOCAL_DIRS=$1
+CRITICAL=`echo $2 | cut -d % -f 1`
+IFS=","
+for mapred_dir in $MAPRED_LOCAL_DIRS
+do
+  percent=`df -hl $mapred_dir | awk '{percent=$5;} END{print percent}' | cut -d % -f 1`
+  if [ $percent -ge $CRITICAL ]; then
+    echo "CRITICAL: MapReduce local dir is full."
+    exit 2
+  fi
+done
+echo "OK: MapReduce local dir space is available."
+exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_name_dir_status.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_name_dir_status.php b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_name_dir_status.php
new file mode 100644
index 0000000..186166d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_name_dir_status.php
@@ -0,0 +1,93 @@
+<?php
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This plugin makes call to namenode, get the jmx-json document
+ * check the NameDirStatuses to find any offline (failed) directories
+ * check_jmx -H hostaddress -p port -k keytab path -r principal name -t kinit path -s security enabled
+ */
+ 
+  include "hdp_nagios_init.php";
+
+  $options = getopt("h:p:e:k:r:t:s:");
+  //Check only for mandatory options
+  if (!array_key_exists('h', $options) || !array_key_exists('p', $options)) {
+    usage();
+    exit(3);
+  }
+
+  $host=$options['h'];
+  $port=$options['p'];
+  $keytab_path=$options['k'];
+  $principal_name=$options['r'];
+  $kinit_path_local=$options['t'];
+  $security_enabled=$options['s'];
+  $ssl_enabled=$options['e'];
+  
+  /* Kinit if security enabled */
+  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
+  $retcode = $status[0];
+  $output = $status[1];
+  
+  if ($output != 0) {
+    echo "CRITICAL: Error doing kinit for nagios. $output";
+    exit (2);
+  }
+
+  $protocol = ($ssl_enabled == "true" ? "https" : "http");
+
+  /* Get the json document */
+  $ch = curl_init();
+  $username = rtrim(`id -un`, "\n");
+  curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=NameNode,name=NameNodeInfo",
+                                CURLOPT_RETURNTRANSFER => true,
+                                CURLOPT_HTTPAUTH => CURLAUTH_ANY,
+                                CURLOPT_USERPWD => "$username:",
+                                CURLOPT_SSL_VERIFYPEER => FALSE ));
+  $json_string = curl_exec($ch);
+  $info = curl_getinfo($ch);
+  if (intval($info['http_code']) == 401){
+    logout();
+    $json_string = curl_exec($ch);
+  }
+  $info = curl_getinfo($ch);
+  curl_close($ch);
+  $json_array = json_decode($json_string, true);
+  $object = $json_array['beans'][0];
+  if ($object['NameDirStatuses'] == "") {
+    echo "WARNING: NameNode directory status not available via ".$protocol."://".$host.":".$port."/jmx url, code " . $info['http_code'] ."\n";
+    exit(1);
+  }
+  $NameDirStatuses = json_decode($object['NameDirStatuses'], true);
+  $failed_dir_count = count($NameDirStatuses['failed']);
+  $out_msg = "CRITICAL: Offline NameNode directories: ";
+  if ($failed_dir_count > 0) {
+    foreach ($NameDirStatuses['failed'] as $key => $value) {
+      $out_msg = $out_msg . $key . ":" . $value . ", ";
+    }
+    echo $out_msg . "\n";
+    exit (2);
+  }
+  echo "OK: All NameNode directories are active" . "\n";
+  exit(0);
+
+  /* print usage */
+  function usage () {
+    echo "Usage: $0 -h <host> -p port -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled";
+  }
+?>
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_namenodes_ha.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_namenodes_ha.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_namenodes_ha.sh
new file mode 100644
index 0000000..50b075a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_namenodes_ha.sh
@@ -0,0 +1,82 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+IFS=',' read -a namenodes <<< "$1"
+port=$2
+totalNN=${#namenodes[@]}
+activeNN=()
+standbyNN=()
+unavailableNN=()
+
+for nn in "${namenodes[@]}"
+do
+  status=$(curl -m 5 -s http://$nn:$port/jmx?qry=Hadoop:service=NameNode,name=FSNamesystem | grep -i "tag.HAState" | grep -o -E "standby|active")
+  if [ "$status" == "active" ]; then
+    activeNN[${#activeNN[*]}]="$nn"
+  elif [ "$status" == "standby" ]; then
+    standbyNN[${#standbyNN[*]}]="$nn"
+  elif [ "$status" == "" ]; then
+    unavailableNN[${#unavailableNN[*]}]="$nn"
+  fi
+done
+
+message=""
+critical=false
+
+if [ ${#activeNN[@]} -gt 1 ]; then
+  critical=true
+  message=$message" Only one NN can have HAState=active;"
+elif [ ${#activeNN[@]} == 0 ]; then
+  critical=true
+  message=$message" No Active NN available;"
+elif [ ${#standbyNN[@]} == 0 ]; then
+  critical=true
+  message=$message" No Standby NN available;"
+fi
+
+NNstats=" Active<"
+for nn in "${activeNN[@]}"
+do
+  NNstats="$NNstats$nn;"
+done
+NNstats=${NNstats%\;}
+NNstats=$NNstats">, Standby<"
+for nn in "${standbyNN[@]}"
+do
+  NNstats="$NNstats$nn;"
+done
+NNstats=${NNstats%\;}
+NNstats=$NNstats">, Unavailable<"
+for nn in "${unavailableNN[@]}"
+do
+  NNstats="$NNstats$nn;"
+done
+NNstats=${NNstats%\;}
+NNstats=$NNstats">"
+
+if [ $critical == false ]; then
+  echo "OK: NameNode HA healthy;"$NNstats
+  exit 0
+fi
+
+echo "CRITICAL:"$message$NNstats
+exit 2

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_nodemanager_health.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_nodemanager_health.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_nodemanager_health.sh
new file mode 100644
index 0000000..020b41d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_nodemanager_health.sh
@@ -0,0 +1,44 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+HOST=$1
+PORT=$2
+NODEMANAGER_URL="http://$HOST:$PORT/ws/v1/node/info"
+SEC_ENABLED=$3
+export PATH="/usr/bin:$PATH"
+if [[ "$SEC_ENABLED" == "true" ]]; then
+  NAGIOS_KEYTAB=$4
+  NAGIOS_USER=$5
+  KINIT_PATH=$6
+  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
+  if [[ "$?" -ne 0 ]]; then
+    echo "CRITICAL: Error doing kinit for nagios [$out1]";
+    exit 2;
+  fi
+fi
+
+RESPONSE=`curl --negotiate -u : -s $NODEMANAGER_URL`
+if [[ "$RESPONSE" == *'"nodeHealthy":true'* ]]; then 
+  echo "OK: NodeManager healthy";
+  exit 0;
+fi
+echo "CRITICAL: NodeManager unhealthy";
+exit 2;

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_oozie_status.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_oozie_status.sh b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_oozie_status.sh
new file mode 100644
index 0000000..820ee99
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_oozie_status.sh
@@ -0,0 +1,45 @@
+#!/usr/bin/env bash
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+# OOZIE_URL is of the form http://<hostname>:<port>/oozie
+HOST=`echo $1 | tr '[:upper:]' '[:lower:]'`
+PORT=$2
+JAVA_HOME=$3
+SEC_ENABLED=$4
+if [[ "$SEC_ENABLED" == "true" ]]; then
+  NAGIOS_KEYTAB=$5
+  NAGIOS_USER=$6
+  KINIT_PATH=$7
+  out1=`${KINIT_PATH} -kt ${NAGIOS_KEYTAB} ${NAGIOS_USER} 2>&1`
+  if [[ "$?" -ne 0 ]]; then
+    echo "CRITICAL: Error doing kinit for nagios [$out1]";
+    exit 2;
+  fi
+fi
+OOZIE_URL="http://$HOST:$PORT/oozie"
+export JAVA_HOME=$JAVA_HOME
+out=`oozie admin -oozie ${OOZIE_URL} -status 2>&1`
+if [[ "$?" -ne 0 ]]; then 
+  echo "CRITICAL: Error accessing Oozie Server status [$out]";
+  exit 2;
+fi
+echo "OK: Oozie Server status [$out]";
+exit 0;

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_rpcq_latency.php
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_rpcq_latency.php b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_rpcq_latency.php
new file mode 100644
index 0000000..463f69b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/NAGIOS/package/files/check_rpcq_latency.php
@@ -0,0 +1,104 @@
+<?php
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/* This plugin makes call to master node, get the jmx-json document
+ * It checks the rpc wait time in the queue, RpcQueueTime_avg_time
+ * check_rpcq_latency -h hostaddress -p port -t ServiceName -w 1 -c 1
+ * Warning and Critical values are in seconds
+ * Service Name = JobTracker, NameNode, JobHistoryServer
+ */
+
+  include "hdp_nagios_init.php";
+
+  $options = getopt ("h:p:w:c:n:e:k:r:t:s:");
+  if (!array_key_exists('h', $options) || !array_key_exists('p', $options) || !array_key_exists('w', $options)
+      || !array_key_exists('c', $options) || !array_key_exists('n', $options)) {
+    usage();
+    exit(3);
+  }
+
+  $host=$options['h'];
+  $port=$options['p'];
+  $master=$options['n'];
+  $warn=$options['w'];
+  $crit=$options['c'];
+  $keytab_path=$options['k'];
+  $principal_name=$options['r'];
+  $kinit_path_local=$options['t'];
+  $security_enabled=$options['s'];
+  $ssl_enabled=$options['e'];
+
+  /* Kinit if security enabled */
+  $status = kinit_if_needed($security_enabled, $kinit_path_local, $keytab_path, $principal_name);
+  $retcode = $status[0];
+  $output = $status[1];
+  
+  if ($output != 0) {
+    echo "CRITICAL: Error doing kinit for nagios. $output";
+    exit (2);
+  }
+
+  $protocol = ($ssl_enabled == "true" ? "https" : "http");
+
+
+  /* Get the json document */
+  $ch = curl_init();
+  $username = rtrim(`id -un`, "\n");
+  curl_setopt_array($ch, array( CURLOPT_URL => $protocol."://".$host.":".$port."/jmx?qry=Hadoop:service=".$master.",name=RpcActivityForPort*",
+                                CURLOPT_RETURNTRANSFER => true,
+                                CURLOPT_HTTPAUTH => CURLAUTH_ANY,
+                                CURLOPT_USERPWD => "$username:",
+                                CURLOPT_SSL_VERIFYPEER => FALSE ));
+  $json_string = curl_exec($ch);
+  $info = curl_getinfo($ch);
+  if (intval($info['http_code']) == 401){
+    logout();
+    $json_string = curl_exec($ch);
+  }
+  $info = curl_getinfo($ch);
+  curl_close($ch);
+  $json_array = json_decode($json_string, true);
+  $object = $json_array['beans'][0];
+  if (count($object) == 0) {
+    echo "CRITICAL: Data inaccessible, Status code = ". $info['http_code'] ."\n";
+    exit(2);
+  } 
+  $RpcQueueTime_avg_time = round($object['RpcQueueTime_avg_time'], 2); 
+  $RpcProcessingTime_avg_time = round($object['RpcProcessingTime_avg_time'], 2);
+
+  $out_msg = "RpcQueueTime_avg_time:<" . $RpcQueueTime_avg_time .
+             "> Secs, RpcProcessingTime_avg_time:<" . $RpcProcessingTime_avg_time .
+             "> Secs";
+
+  if ($RpcQueueTime_avg_time >= $crit) {
+    echo "CRITICAL: " . $out_msg . "\n";
+    exit (2);
+  }
+  if ($RpcQueueTime_avg_time >= $warn) {
+    echo "WARNING: " . $out_msg . "\n";
+    exit (1);
+  }
+  echo "OK: " . $out_msg . "\n";
+  exit(0);
+
+  /* print usage */
+  function usage () {
+    echo "Usage: $0 -h <host> -p port -n <JobTracker/NameNode/JobHistoryServer> -w <warn_in_sec> -c <crit_in_sec> -k keytab path -r principal name -t kinit path -s security enabled -e ssl enabled\n";
+  }
+?>


[15/37] AMBARI-4341. Rename 2.0.8 to 2.1.1 in the stack definition. (mahadev)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/templates/log4j.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/templates/log4j.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/templates/log4j.properties.j2
deleted file mode 100644
index db69564..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/templates/log4j.properties.j2
+++ /dev/null
@@ -1,71 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-#
-# ZooKeeper Logging Configuration
-#
-
-# Format is "<default threshold> (, <appender>)+
-
-# DEFAULT: console appender only
-log4j.rootLogger=INFO, CONSOLE
-
-# Example with rolling log file
-#log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE
-
-# Example with rolling log file and tracing
-#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE
-
-#
-# Log INFO level and above messages to the console
-#
-log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender
-log4j.appender.CONSOLE.Threshold=INFO
-log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout
-log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n
-
-#
-# Add ROLLINGFILE to rootLogger to get log file output
-#    Log DEBUG level and above messages to a log file
-log4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender
-log4j.appender.ROLLINGFILE.Threshold=DEBUG
-log4j.appender.ROLLINGFILE.File=zookeeper.log
-
-# Max log file size of 10MB
-log4j.appender.ROLLINGFILE.MaxFileSize=10MB
-# uncomment the next line to limit number of backup files
-#log4j.appender.ROLLINGFILE.MaxBackupIndex=10
-
-log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout
-log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L] - %m%n
-
-
-#
-# Add TRACEFILE to rootLogger to get log file output
-#    Log DEBUG level and above messages to a log file
-log4j.appender.TRACEFILE=org.apache.log4j.FileAppender
-log4j.appender.TRACEFILE.Threshold=TRACE
-log4j.appender.TRACEFILE.File=zookeeper_trace.log
-
-log4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout
-### Notice we are including log4j's NDC here (%x)
-log4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} - %-5p [%t:%C{1}@%L][%x] - %m%n

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/templates/zoo.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/templates/zoo.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/templates/zoo.cfg.j2
deleted file mode 100644
index 5b68218..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/templates/zoo.cfg.j2
+++ /dev/null
@@ -1,51 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-#
-
-# The number of milliseconds of each tick
-tickTime={{tickTime}}
-# The number of ticks that the initial
-# synchronization phase can take
-initLimit={{initLimit}}
-# The number of ticks that can pass between
-# sending a request and getting an acknowledgement
-syncLimit={{syncLimit}}
-# the directory where the snapshot is stored.
-dataDir={{zk_data_dir}}
-# the port at which the clients will connect
-clientPort={{clientPort}}
-{% for host in zookeeper_hosts %}
-server.{{loop.index}}={{host}}:2888:3888
-{% endfor %}
-
-{% if security_enabled %}
-authProvider.1=org.apache.zookeeper.server.auth.SASLAuthenticationProvider
-jaasLoginRenew=3600000
-kerberos.removeHostFromPrincipal=true
-kerberos.removeRealmFromPrincipal=true
-{% endif %}
-
-{% if zoo_cfg_properties_map_length > 0 %}
-# Custom properties
-{% endif %}
-{% for key, value in zoo_cfg_properties_map.iteritems() %}
-{{key}}={{value}}
-{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/templates/zookeeper-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/templates/zookeeper-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/templates/zookeeper-env.sh.j2
deleted file mode 100644
index 493a2a4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/templates/zookeeper-env.sh.j2
+++ /dev/null
@@ -1,25 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-export JAVA_HOME={{java64_home}}
-export ZOO_LOG_DIR={{zk_log_dir}}
-export ZOOPIDFILE={{zk_pid_file}}
-export SERVER_JVMFLAGS={{zk_server_heapsize}}
-export JAVA=$JAVA_HOME/bin/java
-export CLASSPATH=$CLASSPATH:/usr/share/zookeeper/*
-
-{% if security_enabled %}
-export SERVER_JVMFLAGS="$SERVER_JVMFLAGS -Djava.security.auth.login.config={{zk_server_jaas_file}}"
-export CLIENT_JVMFLAGS="$CLIENT_JVMFLAGS -Djava.security.auth.login.config={{zk_client_jaas_file}}"
-{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2
deleted file mode 100644
index 696718e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2
+++ /dev/null
@@ -1,5 +0,0 @@
-Client {
-com.sun.security.auth.module.Krb5LoginModule required
-useKeyTab=false
-useTicketCache=true;
-};

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2
deleted file mode 100644
index aa123e1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2
+++ /dev/null
@@ -1,8 +0,0 @@
-Server {
-com.sun.security.auth.module.Krb5LoginModule required
-useKeyTab=true
-storeKey=true
-useTicketCache=false
-keyTab="{{zk_keytab_path}}"
-principal="{{zk_principal}}";
-};

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-INSTALL/files/changeToSecureUid.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-INSTALL/files/changeToSecureUid.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-INSTALL/files/changeToSecureUid.sh
new file mode 100644
index 0000000..4872a10
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-INSTALL/files/changeToSecureUid.sh
@@ -0,0 +1,50 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+
+username=$1
+directories=$2
+
+function find_available_uid() {
+ for ((i=1001; i<=2000; i++))
+ do
+   grep -q $i /etc/passwd
+   if [ "$?" -ne 0 ]
+   then
+    newUid=$i
+    break
+   fi
+ done
+}
+
+find_available_uid
+
+if [ $newUid -eq 0 ]
+then
+  echo "Failed to find Uid between 1000 and 2000"
+  exit 1
+fi
+
+dir_array=($(echo $directories | sed 's/,/\n/g'))
+old_uid=$(id -u $username)
+echo "Changing uid of $username from $old_uid to $newUid"
+echo "Changing directory permisions for ${dir_array[@]}"
+usermod -u $newUid $username && for dir in ${dir_array[@]} ; do chown -Rh $newUid $dir ; done
+exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-INSTALL/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-INSTALL/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-INSTALL/scripts/hook.py
new file mode 100644
index 0000000..51e5cd2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-INSTALL/scripts/hook.py
@@ -0,0 +1,36 @@
+##!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from shared_initialization import *
+
+#TODO this must be "CONFIGURE" hook when CONFIGURE command will be implemented
+class BeforeConfigureHook(Hook):
+
+  def hook(self, env):
+    import params
+
+    env.set_params(params)
+    setup_users()
+    install_packages()
+
+if __name__ == "__main__":
+  BeforeConfigureHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-INSTALL/scripts/params.py
new file mode 100644
index 0000000..dc6d770
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-INSTALL/scripts/params.py
@@ -0,0 +1,84 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from resource_management.core.system import System
+import os
+
+config = Script.get_config()
+
+#users and groups
+yarn_user = config['configurations']['global']['yarn_user']
+hbase_user = config['configurations']['global']['hbase_user']
+nagios_user = config['configurations']['global']['nagios_user']
+oozie_user = config['configurations']['global']['oozie_user']
+webhcat_user = config['configurations']['global']['hcat_user']
+hcat_user = config['configurations']['global']['hcat_user']
+hive_user = config['configurations']['global']['hive_user']
+smoke_user =  config['configurations']['global']['smokeuser']
+mapred_user = config['configurations']['global']['mapred_user']
+hdfs_user = config['configurations']['global']['hdfs_user']
+zk_user = config['configurations']['global']['zk_user']
+gmetad_user = config['configurations']['global']["gmetad_user"]
+gmond_user = config['configurations']['global']["gmond_user"]
+storm_user = config['configurations']['global']['storm_user']
+
+user_group = config['configurations']['global']['user_group']
+proxyuser_group =  config['configurations']['global']['proxyuser_group']
+nagios_group = config['configurations']['global']['nagios_group']
+smoke_user_group =  "users"
+mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
+
+#hosts
+hostname = config["hostname"]
+rm_host = default("/clusterHostInfo/rm_host", [])
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
+oozie_servers = default("/clusterHostInfo/oozie_server", [])
+hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
+hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+hs_host = default("/clusterHostInfo/hs_host", [])
+jtnode_host = default("/clusterHostInfo/jtnode_host", [])
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
+ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
+storm_server_hosts = default("/clusterHostInfo/nimbus_hosts", [])
+
+has_resourcemanager = not len(rm_host) == 0
+has_slaves = not len(slave_hosts) == 0
+has_nagios = not len(hagios_server_hosts) == 0
+has_oozie_server = not len(oozie_servers)  == 0
+has_hcat_server_host = not len(hcat_server_hosts)  == 0
+has_hive_server_host = not len(hive_server_host)  == 0
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_zk_host = not len(zk_hosts) == 0
+has_ganglia_server = not len(ganglia_server_hosts) == 0
+has_storm_server = not len(storm_server_hosts) == 0
+
+is_namenode_master = hostname in namenode_host
+is_jtnode_master = hostname in jtnode_host
+is_rmnode_master = hostname in rm_host
+is_hsnode_master = hostname in hs_host
+is_hbase_master = hostname in hbase_master_hosts
+is_slave = hostname in slave_hosts
+if has_ganglia_server:
+  ganglia_server_host = ganglia_server_hosts[0]
+
+hbase_tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-INSTALL/scripts/shared_initialization.py
new file mode 100644
index 0000000..cf6c2c5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-INSTALL/scripts/shared_initialization.py
@@ -0,0 +1,113 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+
+from resource_management import *
+
+def setup_users():
+  """
+  Creates users before cluster installation
+  """
+  import params
+
+  Group(params.user_group)
+  Group(params.smoke_user_group)
+  Group(params.proxyuser_group)
+  User(params.smoke_user,
+       gid=params.user_group,
+       groups=[params.proxyuser_group]
+  )
+  smoke_user_dirs = format(
+    "/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
+  set_uid(params.smoke_user, smoke_user_dirs)
+
+  if params.has_hbase_masters:
+    User(params.hbase_user,
+         gid = params.user_group,
+         groups=[params.user_group])
+    hbase_user_dirs = format(
+      "/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
+    set_uid(params.hbase_user, hbase_user_dirs)
+
+  if params.has_nagios:
+    Group(params.nagios_group)
+    User(params.nagios_user,
+         gid=params.nagios_group)
+
+  if params.has_oozie_server:
+    User(params.oozie_user,
+         gid = params.user_group)
+
+  if params.has_hcat_server_host:
+    User(params.webhcat_user,
+         gid = params.user_group)
+    User(params.hcat_user,
+         gid = params.user_group)
+
+  if params.has_hive_server_host:
+    User(params.hive_user,
+         gid = params.user_group)
+
+  if params.has_resourcemanager:
+    User(params.yarn_user,
+         gid = params.user_group)
+
+  if params.has_ganglia_server:
+    Group(params.gmetad_user)
+    Group(params.gmond_user)
+    User(params.gmond_user,
+         gid=params.user_group,
+        groups=[params.gmond_user])
+    User(params.gmetad_user,
+         gid=params.user_group,
+        groups=[params.gmetad_user])
+
+  User(params.hdfs_user,
+        gid=params.user_group,
+        groups=[params.user_group]
+  )
+  User(params.mapred_user,
+       gid=params.user_group,
+       groups=[params.user_group]
+  )
+  if params.has_zk_host:
+    User(params.zk_user,
+         gid=params.user_group)
+
+  if params.has_storm_server:
+    User(params.storm_user,
+         gid=params.user_group,
+         groups=[params.user_group]
+    )
+
+def set_uid(user, user_dirs):
+  """
+  user_dirs - comma separated directories
+  """
+  File("/tmp/changeUid.sh",
+       content=StaticFile("changeToSecureUid.sh"),
+       mode=0555)
+  Execute(format("/tmp/changeUid.sh {user} {user_dirs} 2>/dev/null"),
+          not_if = format("test $(id -u {user}) -gt 1000"))
+
+def install_packages():
+  Package("unzip")
+  Package("net-snmp")
+  Package("net-snmp-utils")
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/files/checkForFormat.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/files/checkForFormat.sh
new file mode 100644
index 0000000..d14091a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/files/checkForFormat.sh
@@ -0,0 +1,62 @@
+#!/bin/sh
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+export hdfs_user=$1
+shift
+export conf_dir=$1
+shift
+export mark_dir=$1
+shift
+export name_dirs=$*
+
+export EXIT_CODE=0
+export command="namenode -format"
+export list_of_non_empty_dirs=""
+
+mark_file=/var/run/hadoop/hdfs/namenode-formatted
+if [[ -f ${mark_file} ]] ; then
+  rm -f ${mark_file}
+  mkdir -p ${mark_dir}
+fi
+
+if [[ ! -d $mark_dir ]] ; then
+  for dir in `echo $name_dirs | tr ',' ' '` ; do
+    echo "NameNode Dirname = $dir"
+    cmd="ls $dir | wc -l  | grep -q ^0$"
+    eval $cmd
+    if [[ $? -ne 0 ]] ; then
+      (( EXIT_CODE = $EXIT_CODE + 1 ))
+      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
+    fi
+  done
+
+  if [[ $EXIT_CODE == 0 ]] ; then
+    su - ${hdfs_user} -c "yes Y | hadoop --config ${conf_dir} ${command}"
+  else
+    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
+  fi
+else
+  echo "${mark_dir} exists. Namenode DFS already formatted"
+fi
+
+exit $EXIT_CODE
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/files/task-log4j.properties
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/files/task-log4j.properties b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/files/task-log4j.properties
new file mode 100644
index 0000000..c8939fc
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/files/task-log4j.properties
@@ -0,0 +1,132 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+
+
+# Define some default values that can be overridden by system properties
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+#
+# Job Summary Appender 
+#
+# Use following logger to send summary to separate file defined by 
+# hadoop.mapreduce.jobsummary.log.file rolled daily:
+# hadoop.mapreduce.jobsummary.logger=INFO,JSA
+# 
+hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
+hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this 
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+# Rolling File Appender
+#
+
+#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Logfile size and and 30-day backups
+#log4j.appender.RFA.MaxFileSize=1MB
+#log4j.appender.RFA.MaxBackupIndex=30
+
+#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
+#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+# Custom Logging levels
+
+hadoop.metrics.log.level=INFO
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+#
+# Null Appender
+# Trap security logger on the hadoop client side
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/scripts/hook.py
new file mode 100644
index 0000000..e11bfac
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/scripts/hook.py
@@ -0,0 +1,37 @@
+##!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from shared_initialization import *
+
+#TODO this must be "CONFIGURE" hook when CONFIGURE command will be implemented
+class BeforeConfigureHook(Hook):
+
+  def hook(self, env):
+    import params
+
+    env.set_params(params)
+    setup_java()
+    setup_hadoop()
+    setup_configs()
+
+if __name__ == "__main__":
+  BeforeConfigureHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/scripts/params.py
new file mode 100644
index 0000000..712a5ab
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/scripts/params.py
@@ -0,0 +1,172 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+from resource_management.core.system import System
+import os
+
+config = Script.get_config()
+
+#java params
+artifact_dir = "/tmp/HDP-artifacts/"
+jdk_name = default("/hostLevelParams/jdk_name", None) # None when jdk is already installed by user
+jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user
+jce_location = config['hostLevelParams']['jdk_location']
+jdk_location = config['hostLevelParams']['jdk_location']
+#security params
+security_enabled = config['configurations']['global']['security_enabled']
+dfs_journalnode_keytab_file = config['configurations']['hdfs-site']['dfs.journalnode.keytab.file']
+dfs_web_authentication_kerberos_keytab = config['configurations']['hdfs-site']['dfs.journalnode.keytab.file']
+dfs_secondary_namenode_keytab_file =  config['configurations']['hdfs-site']['fs.secondary.namenode.keytab.file']
+dfs_datanode_keytab_file =  config['configurations']['hdfs-site']['dfs.datanode.keytab.file']
+dfs_namenode_keytab_file =  config['configurations']['hdfs-site']['dfs.namenode.keytab.file']
+
+dfs_datanode_kerberos_principal = config['configurations']['hdfs-site']['dfs.datanode.kerberos.principal']
+dfs_journalnode_kerberos_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.principal']
+dfs_secondary_namenode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.internal.spnego.principal']
+dfs_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.namenode.kerberos.principal']
+dfs_web_authentication_kerberos_principal = config['configurations']['hdfs-site']['dfs.web.authentication.kerberos.principal']
+dfs_secondary_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.principal']
+dfs_journalnode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.internal.spnego.principal']
+
+#users and groups
+mapred_user = config['configurations']['global']['mapred_user']
+hdfs_user = config['configurations']['global']['hdfs_user']
+yarn_user = config['configurations']['global']['yarn_user']
+
+user_group = config['configurations']['global']['user_group']
+mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
+
+#snmp
+snmp_conf_dir = "/etc/snmp/"
+snmp_source = "0.0.0.0/0"
+snmp_community = "hadoop"
+
+#hosts
+hostname = config["hostname"]
+rm_host = default("/clusterHostInfo/rm_host", [])
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
+oozie_servers = default("/clusterHostInfo/oozie_server", [])
+hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
+hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+hs_host = default("/clusterHostInfo/hs_host", [])
+jtnode_host = default("/clusterHostInfo/jtnode_host", [])
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
+ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
+
+has_resourcemanager = not len(rm_host) == 0
+has_slaves = not len(slave_hosts) == 0
+has_nagios = not len(hagios_server_hosts) == 0
+has_oozie_server = not len(oozie_servers)  == 0
+has_hcat_server_host = not len(hcat_server_hosts)  == 0
+has_hive_server_host = not len(hive_server_host)  == 0
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_zk_host = not len(zk_hosts) == 0
+has_ganglia_server = not len(ganglia_server_hosts) == 0
+
+is_namenode_master = hostname in namenode_host
+is_jtnode_master = hostname in jtnode_host
+is_rmnode_master = hostname in rm_host
+is_hsnode_master = hostname in hs_host
+is_hbase_master = hostname in hbase_master_hosts
+is_slave = hostname in slave_hosts
+if has_ganglia_server:
+  ganglia_server_host = ganglia_server_hosts[0]
+#hadoop params
+hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}")
+hadoop_lib_home = "/usr/lib/hadoop/lib"
+hadoop_conf_dir = "/etc/hadoop/conf"
+hadoop_pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix']
+hadoop_home = "/usr"
+hadoop_bin = "/usr/lib/hadoop/sbin"
+
+task_log4j_properties_location = os.path.join(hadoop_conf_dir, "task-log4j.properties")
+limits_conf_dir = "/etc/security/limits.d"
+
+hdfs_log_dir_prefix = config['configurations']['global']['hdfs_log_dir_prefix']
+hbase_tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
+#db params
+server_db_name = config['hostLevelParams']['db_name']
+db_driver_filename = config['hostLevelParams']['db_driver_filename']
+oracle_driver_url = config['hostLevelParams']['oracle_jdbc_url']
+mysql_driver_url = config['hostLevelParams']['mysql_jdbc_url']
+
+ambari_db_rca_url = config['hostLevelParams']['ambari_db_rca_url'][0]
+ambari_db_rca_driver = config['hostLevelParams']['ambari_db_rca_driver'][0]
+ambari_db_rca_username = config['hostLevelParams']['ambari_db_rca_username'][0]
+ambari_db_rca_password = config['hostLevelParams']['ambari_db_rca_password'][0]
+
+rca_enabled = config['configurations']['global']['rca_enabled']
+rca_disabled_prefix = "###"
+if rca_enabled == True:
+  rca_prefix = ""
+else:
+  rca_prefix = rca_disabled_prefix
+
+#hadoop-env.sh
+java_home = config['hostLevelParams']['java_home']
+if System.get_instance().platform == "suse":
+  jsvc_path = "/usr/lib/bigtop-utils"
+else:
+  jsvc_path = "/usr/libexec/bigtop-utils"
+
+hadoop_heapsize = config['configurations']['global']['hadoop_heapsize']
+namenode_heapsize = config['configurations']['global']['namenode_heapsize']
+namenode_opt_newsize =  config['configurations']['global']['namenode_opt_newsize']
+namenode_opt_maxnewsize =  config['configurations']['global']['namenode_opt_maxnewsize']
+
+jtnode_opt_newsize = default("jtnode_opt_newsize","200m")
+jtnode_opt_maxnewsize = default("jtnode_opt_maxnewsize","200m")
+jtnode_heapsize =  default("jtnode_heapsize","1024m")
+ttnode_heapsize = "1024m"
+
+dtnode_heapsize = config['configurations']['global']['dtnode_heapsize']
+mapred_pid_dir_prefix = default("mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
+mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
+hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
+mapred_log_dir_prefix = default("mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
+
+#taskcontroller.cfg
+
+mapred_local_dir = "/tmp/hadoop-mapred/mapred/local"
+
+#log4j.properties
+
+yarn_log_dir_prefix = default("yarn_log_dir_prefix","/var/log/hadoop-yarn")
+
+#hdfs ha properties
+dfs_ha_enabled = False
+dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
+dfs_ha_namenode_ids = default(format("hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
+if dfs_ha_namenode_ids:
+  dfs_ha_namenode_ids_array_len = len(dfs_ha_namenode_ids.split(","))
+  if dfs_ha_namenode_ids_array_len > 1:
+    dfs_ha_enabled = True
+
+if dfs_ha_enabled:
+  for nn_id in dfs_ha_namenode_ids:
+    nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
+    if hostname in nn_host:
+      namenode_id = nn_id
+  namenode_id = None
+
+dfs_hosts = default('/configurations/hdfs-site/dfs.hosts', None)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/scripts/shared_initialization.py
new file mode 100644
index 0000000..f2644aa
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/scripts/shared_initialization.py
@@ -0,0 +1,327 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import os
+
+from resource_management import *
+
+def setup_java():
+  """
+  Installs jdk using specific params, that comes from ambari-server
+  """
+  import params
+
+  jdk_curl_target = format("{artifact_dir}/{jdk_name}")
+  java_dir = os.path.dirname(params.java_home)
+  java_exec = format("{java_home}/bin/java")
+  
+  if not params.jdk_name:
+    return
+  
+  Execute(format("mkdir -p {artifact_dir} ; curl -kf --retry 10 {jdk_location}/{jdk_name} -o {jdk_curl_target}"),
+          path = ["/bin","/usr/bin/"],
+          not_if = format("test -e {java_exec}"))
+
+  if params.jdk_name.endswith(".bin"):
+    install_cmd = format("mkdir -p {java_dir} ; chmod +x {jdk_curl_target}; cd {java_dir} ; echo A | {jdk_curl_target} -noregister > /dev/null 2>&1")
+  elif params.jdk_name.endswith(".gz"):
+    install_cmd = format("mkdir -p {java_dir} ; cd {java_dir} ; tar -xf {jdk_curl_target} > /dev/null 2>&1")
+  
+  Execute(install_cmd,
+          path = ["/bin","/usr/bin/"],
+          not_if = format("test -e {java_exec}")
+  )
+  jce_curl_target = format("{artifact_dir}/{jce_policy_zip}")
+  download_jce = format("mkdir -p {artifact_dir}; curl -kf --retry 10 {jce_location}/{jce_policy_zip} -o {jce_curl_target}")
+  Execute( download_jce,
+        path = ["/bin","/usr/bin/"],
+        not_if =format("test -e {jce_curl_target}"),
+        ignore_failures = True
+  )
+  
+  if params.security_enabled:
+    security_dir = format("{java_home}/jre/lib/security")
+    extract_cmd = format("rm -f local_policy.jar; rm -f US_export_policy.jar; unzip -o -j -q {jce_curl_target}")
+    Execute(extract_cmd,
+          only_if = format("test -e {security_dir} && test -f {jce_curl_target}"),
+          cwd  = security_dir,
+          path = ['/bin/','/usr/bin']
+    )
+
+def setup_hadoop():
+  """
+  Setup hadoop files and directories
+  """
+  import params
+
+  File(os.path.join(params.snmp_conf_dir, 'snmpd.conf'),
+       content=Template("snmpd.conf.j2"))
+  Service("snmpd",
+          action = "restart")
+
+  Execute("/bin/echo 0 > /selinux/enforce",
+          only_if="test -f /selinux/enforce"
+  )
+
+  install_snappy()
+
+  #directories
+  Directory(params.hadoop_conf_dir,
+            recursive=True,
+            owner='root',
+            group='root'
+  )
+  Directory(params.hdfs_log_dir_prefix,
+            recursive=True,
+            owner='root',
+            group='root'
+  )
+  Directory(params.hadoop_pid_dir_prefix,
+            recursive=True,
+            owner='root',
+            group='root'
+  )
+  #this doesn't needed with stack 1
+  Directory(os.path.dirname(params.hadoop_tmp_dir),
+            recursive=True,
+            owner=params.hdfs_user,
+            )
+  #files
+  File(os.path.join(params.limits_conf_dir, 'hdfs.conf'),
+       owner='root',
+       group='root',
+       mode=0644,
+       content=Template("hdfs.conf.j2")
+  )
+  if params.security_enabled:
+    File(os.path.join(params.hadoop_bin, "task-controller"),
+         owner="root",
+         group=params.mapred_tt_group,
+         mode=06050
+    )
+    tc_mode = 0644
+    tc_owner = "root"
+  else:
+    tc_mode = None
+    tc_owner = params.hdfs_user
+
+  if tc_mode:
+    File(os.path.join(params.hadoop_conf_dir, 'taskcontroller.cfg'),
+         owner = tc_owner,
+         mode = tc_mode,
+         group = params.mapred_tt_group,
+         content=Template("taskcontroller.cfg.j2")
+    )
+  else:
+    File(os.path.join(params.hadoop_conf_dir, 'taskcontroller.cfg'),
+         owner=tc_owner,
+         content=Template("taskcontroller.cfg.j2")
+    )
+  for file in ['hadoop-env.sh', 'commons-logging.properties', 'slaves']:
+    File(os.path.join(params.hadoop_conf_dir, file),
+         owner=tc_owner,
+         content=Template(file + ".j2")
+    )
+
+  health_check_template = "health_check-v2" #for stack 1 use 'health_check'
+  File(os.path.join(params.hadoop_conf_dir, "health_check"),
+       owner=tc_owner,
+       content=Template(health_check_template + ".j2")
+  )
+
+  File(os.path.join(params.hadoop_conf_dir, "log4j.properties"),
+       owner=params.hdfs_user,
+       content=Template("log4j.properties.j2")
+  )
+
+  update_log4j_props(os.path.join(params.hadoop_conf_dir, "log4j.properties"))
+
+  File(os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"),
+       owner=params.hdfs_user,
+       content=Template("hadoop-metrics2.properties.j2")
+  )
+
+  db_driver_dload_cmd = ""
+  if params.server_db_name == 'oracle' and params.oracle_driver_url != "":
+    db_driver_dload_cmd = format(
+      "curl -kf --retry 5 {oracle_driver_url} -o {hadoop_lib_home}/{db_driver_filename}")
+  elif params.server_db_name == 'mysql' and params.mysql_driver_url != "":
+    db_driver_dload_cmd = format(
+      "curl -kf --retry 5 {mysql_driver_url} -o {hadoop_lib_home}/{db_driver_filename}")
+
+  if db_driver_dload_cmd:
+    Execute(db_driver_dload_cmd,
+            not_if =format("test -e {hadoop_lib_home}/{db_driver_filename}")
+    )
+
+
+def setup_configs():
+  """
+  Creates configs for services DHFS mapred
+  """
+  import params
+
+  if "mapred-queue-acls" in params.config['configurations']:
+    XmlConfig("mapred-queue-acls.xml",
+              conf_dir=params.hadoop_conf_dir,
+              configurations=params.config['configurations'][
+                'mapred-queue-acls'],
+              owner=params.mapred_user,
+              group=params.user_group
+    )
+  elif os.path.exists(
+      os.path.join(params.hadoop_conf_dir, "mapred-queue-acls.xml")):
+    File(os.path.join(params.hadoop_conf_dir, "mapred-queue-acls.xml"),
+         owner=params.mapred_user,
+         group=params.user_group
+    )
+
+  if "hadoop-policy" in params.config['configurations']:
+    XmlConfig("hadoop-policy.xml",
+              conf_dir=params.hadoop_conf_dir,
+              configurations=params.config['configurations']['hadoop-policy'],
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+
+  XmlConfig("core-site.xml",
+            conf_dir=params.hadoop_conf_dir,
+            configurations=params.config['configurations']['core-site'],
+            owner=params.hdfs_user,
+            group=params.user_group
+  )
+
+  if "mapred-site" in params.config['configurations']:
+    XmlConfig("mapred-site.xml",
+              conf_dir=params.hadoop_conf_dir,
+              configurations=params.config['configurations']['mapred-site'],
+              owner=params.mapred_user,
+              group=params.user_group
+    )
+
+  File(params.task_log4j_properties_location,
+       content=StaticFile("task-log4j.properties"),
+       mode=0755
+  )
+
+  if "capacity-scheduler" in params.config['configurations']:
+    XmlConfig("capacity-scheduler.xml",
+              conf_dir=params.hadoop_conf_dir,
+              configurations=params.config['configurations'][
+                'capacity-scheduler'],
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+
+  XmlConfig("hdfs-site.xml",
+            conf_dir=params.hadoop_conf_dir,
+            configurations=params.config['configurations']['hdfs-site'],
+            owner=params.hdfs_user,
+            group=params.user_group
+  )
+
+  # if params.stack_version[0] == "1":
+  #   Link('/usr/lib/hadoop/hadoop-tools.jar',
+  #         to = '/usr/lib/hadoop/lib/hadoop-tools.jar',
+  #         mode = 0755
+  #   )
+
+  if os.path.exists(os.path.join(params.hadoop_conf_dir, 'configuration.xsl')):
+    File(os.path.join(params.hadoop_conf_dir, 'configuration.xsl'),
+         owner=params.hdfs_user,
+         group=params.user_group
+    )
+  if os.path.exists(os.path.join(params.hadoop_conf_dir, 'fair-scheduler.xml')):
+    File(os.path.join(params.hadoop_conf_dir, 'fair-scheduler.xml'),
+         owner=params.mapred_user,
+         group=params.user_group
+    )
+  if os.path.exists(os.path.join(params.hadoop_conf_dir, 'masters')):
+    File(os.path.join(params.hadoop_conf_dir, 'masters'),
+              owner=params.hdfs_user,
+              group=params.user_group
+    )
+  if os.path.exists(
+      os.path.join(params.hadoop_conf_dir, 'ssl-client.xml.example')):
+    File(os.path.join(params.hadoop_conf_dir, 'ssl-client.xml.example'),
+         owner=params.mapred_user,
+         group=params.user_group
+    )
+  if os.path.exists(
+      os.path.join(params.hadoop_conf_dir, 'ssl-server.xml.example')):
+    File(os.path.join(params.hadoop_conf_dir, 'ssl-server.xml.example'),
+         owner=params.mapred_user,
+         group=params.user_group
+    )
+
+  generate_include_file()
+
+def update_log4j_props(file):
+  import params
+
+  property_map = {
+    'ambari.jobhistory.database': params.ambari_db_rca_url,
+    'ambari.jobhistory.driver': params.ambari_db_rca_driver,
+    'ambari.jobhistory.user': params.ambari_db_rca_username,
+    'ambari.jobhistory.password': params.ambari_db_rca_password,
+    'ambari.jobhistory.logger': 'DEBUG,JHA',
+
+    'log4j.appender.JHA': 'org.apache.ambari.log4j.hadoop.mapreduce.jobhistory.JobHistoryAppender',
+    'log4j.appender.JHA.database': '${ambari.jobhistory.database}',
+    'log4j.appender.JHA.driver': '${ambari.jobhistory.driver}',
+    'log4j.appender.JHA.user': '${ambari.jobhistory.user}',
+    'log4j.appender.JHA.password': '${ambari.jobhistory.password}',
+
+    'log4j.logger.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger': '${ambari.jobhistory.logger}',
+    'log4j.additivity.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger': 'true'
+  }
+  for key in property_map:
+    value = property_map[key]
+    Execute(format(
+      "sed -i 's~\\({rca_disabled_prefix}\\)\\?{key}=.*~{rca_prefix}{key}={value}~' {file}"))
+
+
+def generate_include_file():
+  import params
+
+  if params.dfs_hosts and params.has_slaves:
+    include_hosts_list = params.slave_hosts
+    File(params.dfs_hosts,
+         content=Template("include_hosts_list.j2"),
+         owner=params.hdfs_user,
+         group=params.user_group
+    )
+
+
+def install_snappy():
+  import params
+
+  snappy_so = "libsnappy.so"
+  so_target_dir_x86 = format("{hadoop_lib_home}/native/Linux-i386-32")
+  so_target_dir_x64 = format("{hadoop_lib_home}/native/Linux-amd64-64")
+  so_target_x86 = format("{so_target_dir_x86}/{snappy_so}")
+  so_target_x64 = format("{so_target_dir_x64}/{snappy_so}")
+  so_src_dir_x86 = format("{hadoop_home}/lib")
+  so_src_dir_x64 = format("{hadoop_home}/lib64")
+  so_src_x86 = format("{so_src_dir_x86}/{snappy_so}")
+  so_src_x64 = format("{so_src_dir_x64}/{snappy_so}")
+  Execute(
+    format("mkdir -p {so_target_dir_x86}; ln -sf {so_src_x86} {so_target_x86}"))
+  Execute(
+    format("mkdir -p {so_target_dir_x64}; ln -sf {so_src_x64} {so_target_x64}"))

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/commons-logging.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/commons-logging.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/commons-logging.properties.j2
new file mode 100644
index 0000000..77e458f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/commons-logging.properties.j2
@@ -0,0 +1,25 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+#Logging Implementation
+
+#Log4J
+org.apache.commons.logging.Log=org.apache.commons.logging.impl.Log4JLogger
+
+#JDK Logger
+#org.apache.commons.logging.Log=org.apache.commons.logging.impl.Jdk14Logger

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/exclude_hosts_list.j2
new file mode 100644
index 0000000..bb5795b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/exclude_hosts_list.j2
@@ -0,0 +1,3 @@
+{% for host in hdfs_exclude_file %}
+{{host}}
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/hadoop-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/hadoop-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/hadoop-env.sh.j2
new file mode 100644
index 0000000..7d10cc3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/hadoop-env.sh.j2
@@ -0,0 +1,121 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Set Hadoop-specific environment variables here.
+
+# The only required environment variable is JAVA_HOME.  All others are
+# optional.  When running a distributed configuration it is best to
+# set JAVA_HOME in this file, so that it is correctly defined on
+# remote nodes.
+
+# The java implementation to use.  Required.
+export JAVA_HOME={{java_home}}
+export HADOOP_HOME_WARN_SUPPRESS=1
+
+# Hadoop Configuration Directory
+#TODO: if env var set that can cause problems
+export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
+
+{# this is different for HDP1 #}
+# Path to jsvc required by secure HDP 2.0 datanode
+export JSVC_HOME={{jsvc_path}}
+
+
+# The maximum amount of heap to use, in MB. Default is 1000.
+export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
+
+export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
+
+# Extra Java runtime options.  Empty by default.
+export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
+
+# Command specific options appended to HADOOP_OPTS when specified
+export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
+HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
+
+HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
+HADOOP_DATANODE_OPTS="-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
+HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
+
+export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
+
+# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
+export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
+# On secure datanodes, user to run the datanode as after dropping privileges
+export HADOOP_SECURE_DN_USER={{hdfs_user}}
+
+# Extra ssh options.  Empty by default.
+export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
+
+# Where log files are stored.  $HADOOP_HOME/logs by default.
+export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
+
+# History server logs
+export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
+
+# Where log files are stored in the secure data environment.
+export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
+# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
+
+# host:path where hadoop code should be rsync'd from.  Unset by default.
+# export HADOOP_MASTER=master:/home/$USER/src/hadoop
+
+# Seconds to sleep between slave commands.  Unset by default.  This
+# can be useful in large clusters, where, e.g., slave rsyncs can
+# otherwise arrive faster than the master can service them.
+# export HADOOP_SLAVE_SLEEP=0.1
+
+# The directory where pid files are stored. /tmp by default.
+export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
+export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
+
+# History server pid
+export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
+
+YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
+
+# A string representing this instance of hadoop. $USER by default.
+export HADOOP_IDENT_STRING=$USER
+
+# The scheduling priority for daemon processes.  See 'man nice'.
+
+# export HADOOP_NICENESS=10
+
+# Use libraries from standard classpath
+JAVA_JDBC_LIBS=""
+#Add libraries required by mysql connector
+for jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by oracle connector
+for jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`
+do
+  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
+done
+#Add libraries required by nodemanager
+MAPREDUCE_LIBS={{mapreduce_libs_path}}
+export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
+
+# Setting path to hdfs command line
+export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
+
+#Mostly required for hadoop 2.0
+export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/hadoop-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/hadoop-metrics2.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/hadoop-metrics2.properties.j2
new file mode 100644
index 0000000..a6a66ef
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/hadoop-metrics2.properties.j2
@@ -0,0 +1,45 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# syntax: [prefix].[source|sink|jmx].[instance].[options]
+# See package.html for org.apache.hadoop.metrics2 for details
+
+{% if has_ganglia_server %}
+*.period=60
+
+*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
+*.sink.ganglia.period=10
+
+# default for supportsparse is false
+*.sink.ganglia.supportsparse=true
+
+.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
+.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
+
+# Hook up to the server
+namenode.sink.ganglia.servers={{ganglia_server_host}}:8661
+datanode.sink.ganglia.servers={{ganglia_server_host}}:8659
+jobtracker.sink.ganglia.servers={{ganglia_server_host}}:8662
+tasktracker.sink.ganglia.servers={{ganglia_server_host}}:8658
+maptask.sink.ganglia.servers={{ganglia_server_host}}:8660
+reducetask.sink.ganglia.servers={{ganglia_server_host}}:8660
+resourcemanager.sink.ganglia.servers={{ganglia_server_host}}:8664
+nodemanager.sink.ganglia.servers={{ganglia_server_host}}:8657
+historyserver.sink.ganglia.servers={{ganglia_server_host}}:8666
+journalnode.sink.ganglia.servers={{ganglia_server_host}}:8654
+
+resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
+
+{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/hdfs.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/hdfs.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/hdfs.conf.j2
new file mode 100644
index 0000000..ca7baa2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/hdfs.conf.j2
@@ -0,0 +1,17 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+{{hdfs_user}}   - nofile 32768
+{{hdfs_user}}   - nproc  65536

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/health_check-v2.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/health_check-v2.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/health_check-v2.j2
new file mode 100644
index 0000000..cb7b12b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/health_check-v2.j2
@@ -0,0 +1,91 @@
+#!/bin/bash
+#
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+err=0;
+
+function check_disks {
+
+  for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do
+    fsdev=""
+    fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`;
+    if [ -z "$fsdev" -a "$m" != "/mnt" ] ; then
+      msg_="$msg_ $m(u)"
+    else
+      msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`"
+    fi
+  done
+
+  if [ -z "$msg_" ] ; then
+    echo "disks ok" ; exit 0
+  else
+    echo "$msg_" ; exit 2
+  fi
+
+}
+
+function check_link {
+  snmp=/usr/bin/snmpwalk
+  if [ -e $snmp ] ; then
+    $snmp -t 5 -Oe  -Oq  -Os -v 1 -c public localhost if | \
+    awk ' {
+      split($1,a,".") ;
+      if ( a[1] == "ifIndex" ) { ifIndex[a[2]] = $2 }
+      if ( a[1] == "ifDescr" ) { ifDescr[a[2]] = $2 }
+      if ( a[1] == "ifType" ) { ifType[a[2]] = $2 }
+      if ( a[1] == "ifSpeed" ) { ifSpeed[a[2]] = $2 }
+      if ( a[1] == "ifAdminStatus" ) { ifAdminStatus[a[2]] = $2 }
+      if ( a[1] == "ifOperStatus" ) { ifOperStatus[a[2]] = $2 }
+    }
+    END {
+      up=0;
+      for (i in ifIndex ) {
+      if ( ifType[i] == 6 && ifAdminStatus[i] == 1 && ifOperStatus[i] == 1 && ifSpeed[i] == 1000000000 ) {
+      up=i;
+      }
+      }
+      if ( up == 0 ) { print "check link" ; exit 2 }
+      else { print ifDescr[up],"ok" }
+    }'
+    exit $? ;
+  fi
+}
+
+# Run all checks
+# Disabled 'check_link' for now... 
+for check in disks ; do
+  msg=`check_${check}` ;
+  if [ $? -eq 0 ] ; then
+    ok_msg="$ok_msg$msg,"
+  else
+    err_msg="$err_msg$msg,"
+  fi
+done
+
+if [ ! -z "$err_msg" ] ; then
+  echo -n "ERROR $err_msg "
+fi
+if [ ! -z "$ok_msg" ] ; then
+  echo -n "OK: $ok_msg"
+fi
+
+echo
+
+# Success!
+exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/health_check.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/health_check.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/health_check.j2
new file mode 100644
index 0000000..b84b336
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/health_check.j2
@@ -0,0 +1,118 @@
+#!/bin/bash
+#
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+err=0;
+
+function check_disks {
+
+  for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do
+    fsdev=""
+    fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`;
+    if [ -z "$fsdev" ] ; then
+      msg_="$msg_ $m(u)"
+    else
+      msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`"
+    fi
+  done
+
+  if [ -z "$msg_" ] ; then
+    echo "disks ok" ; exit 0
+  else
+    echo "$msg_" ; exit 2
+  fi
+
+}
+
+function check_taskcontroller {
+  if [ "<%=scope.function_hdp_template_var("::hdp::params::security_enabled")%>" == "true" ]; then
+    perm=`stat -c %a:%U:%G <%=scope.function_hdp_template_var("task_bin_exe")%> 2>/dev/null`
+    if [ $? -eq 0 ] && [ "$perm" == "6050:root:hadoop" ] ; then
+      echo "taskcontroller ok"
+    else
+      echo 'check taskcontroller' ; exit 1
+    fi
+  fi
+}
+
+function check_jetty {
+  hname=`hostname`
+  jmx=`curl -s -S -m 5 "http://$hname:<%=scope.function_hdp_template_var("::hdp::tasktracker_port")%>/jmx?qry=Hadoop:service=TaskTracker,name=ShuffleServerMetrics" 2>/dev/null` ;
+  if [ $? -eq 0 ] ; then
+    e=`echo $jmx | awk '/shuffle_exceptions_caught/ {printf"%d",$2}'` ;
+    e=${e:-0} # no jmx servlet ?
+    if [ $e -gt 10 ] ; then
+      echo "check jetty: shuffle_exceptions=$e" ; exit 1
+    else
+      echo "jetty ok"
+    fi
+  else
+    echo "check jetty: ping failed" ; exit 1
+  fi
+}
+
+function check_link {
+  snmp=/usr/bin/snmpwalk
+  if [ -e $snmp ] ; then
+    $snmp -t 5 -Oe  -Oq  -Os -v 1 -c public localhost if | \
+    awk ' {
+      split($1,a,".") ;
+      if ( a[1] == "ifIndex" ) { ifIndex[a[2]] = $2 }
+      if ( a[1] == "ifDescr" ) { ifDescr[a[2]] = $2 }
+      if ( a[1] == "ifType" ) { ifType[a[2]] = $2 }
+      if ( a[1] == "ifSpeed" ) { ifSpeed[a[2]] = $2 }
+      if ( a[1] == "ifAdminStatus" ) { ifAdminStatus[a[2]] = $2 }
+      if ( a[1] == "ifOperStatus" ) { ifOperStatus[a[2]] = $2 }
+    }
+    END {
+      up=0;
+      for (i in ifIndex ) {
+      if ( ifType[i] == 6 && ifAdminStatus[i] == 1 && ifOperStatus[i] == 1 && ifSpeed[i] == 1000000000 ) {
+      up=i;
+      }
+      }
+      if ( up == 0 ) { print "check link" ; exit 2 }
+      else { print ifDescr[up],"ok" }
+    }'
+    exit $? ;
+  fi
+}
+
+# Run all checks
+# Disabled 'check_link' for now... 
+for check in disks taskcontroller jetty; do
+  msg=`check_${check}` ;
+  if [ $? -eq 0 ] ; then
+    ok_msg="$ok_msg$msg,"
+  else
+    err_msg="$err_msg$msg,"
+  fi
+done
+
+if [ ! -z "$err_msg" ] ; then
+  echo -n "ERROR $err_msg "
+fi
+if [ ! -z "$ok_msg" ] ; then
+  echo -n "OK: $ok_msg"
+fi
+
+echo
+
+# Success!
+exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/include_hosts_list.j2
new file mode 100644
index 0000000..cbcf6c3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/include_hosts_list.j2
@@ -0,0 +1,3 @@
+{% for host in slave_hosts %}
+{{host}}
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/log4j.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/log4j.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/log4j.properties.j2
new file mode 100644
index 0000000..6c02292
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/log4j.properties.j2
@@ -0,0 +1,218 @@
+# Copyright 2011 The Apache Software Foundation
+# 
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Define some default values that can be overridden by system properties
+hadoop.root.logger=INFO,console
+hadoop.log.dir=.
+hadoop.log.file=hadoop.log
+
+
+# Define the root logger to the system property "hadoop.root.logger".
+log4j.rootLogger=${hadoop.root.logger}, EventCounter
+
+# Logging Threshold
+log4j.threshhold=ALL
+
+#
+# Daily Rolling File Appender
+#
+
+log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Rollver at midnight
+log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
+
+# 30-day backup
+#log4j.appender.DRFA.MaxBackupIndex=30
+log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
+
+# Pattern format: Date LogLevel LoggerName LogMessage
+log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+# Debugging Pattern format
+#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+#
+# console
+# Add "console" to rootlogger above if you want to use this 
+#
+
+log4j.appender.console=org.apache.log4j.ConsoleAppender
+log4j.appender.console.target=System.err
+log4j.appender.console.layout=org.apache.log4j.PatternLayout
+log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+
+#
+# TaskLog Appender
+#
+
+#Default values
+hadoop.tasklog.taskid=null
+hadoop.tasklog.iscleanup=false
+hadoop.tasklog.noKeepSplits=4
+hadoop.tasklog.totalLogFileSize=100
+hadoop.tasklog.purgeLogSplits=true
+hadoop.tasklog.logsRetainHours=12
+
+log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
+log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
+log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
+log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
+
+log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
+log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
+#
+#Security audit appender
+#
+hadoop.security.logger=INFO,console
+hadoop.security.log.maxfilesize=256MB
+hadoop.security.log.maxbackupindex=20
+log4j.category.SecurityLogger=${hadoop.security.logger}
+hadoop.security.log.file=SecurityAuth.audit
+log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender 
+log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
+
+log4j.appender.RFAS=org.apache.log4j.RollingFileAppender 
+log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
+log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
+
+#
+# hdfs audit logging
+#
+hdfs.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
+log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
+log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
+log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
+
+#
+# mapred audit logging
+#
+mapred.audit.logger=INFO,console
+log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
+log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
+log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
+log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
+log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
+
+#
+# Rolling File Appender
+#
+
+log4j.appender.RFA=org.apache.log4j.RollingFileAppender
+log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
+
+# Logfile size and and 30-day backups
+log4j.appender.RFA.MaxFileSize=256MB
+log4j.appender.RFA.MaxBackupIndex=10
+
+log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
+log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
+
+
+# Custom Logging levels
+
+hadoop.metrics.log.level=INFO
+#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
+#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
+#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
+log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
+
+# Jets3t library
+log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
+
+#
+# Null Appender
+# Trap security logger on the hadoop client side
+#
+log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
+
+#
+# Event Counter Appender
+# Sends counts of logging messages at different severity levels to Hadoop Metrics.
+#
+log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
+
+{% if is_jtnode_master or is_rmnode_master %}
+#
+# Job Summary Appender 
+#
+# Use following logger to send summary to separate file defined by 
+# hadoop.mapreduce.jobsummary.log.file rolled daily:
+# hadoop.mapreduce.jobsummary.logger=INFO,JSA
+# 
+hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
+hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
+log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender
+# Set the ResourceManager summary log filename
+yarn.server.resourcemanager.appsummary.log.file=hadoop-mapreduce.jobsummary.log
+# Set the ResourceManager summary log level and appender
+yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}
+#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
+
+# To enable AppSummaryLogging for the RM,
+# set yarn.server.resourcemanager.appsummary.logger to
+# <LEVEL>,RMSUMMARY in hadoop-env.sh
+
+# Appender for ResourceManager Application Summary Log
+# Requires the following properties to be set
+#    - hadoop.log.dir (Hadoop Log directory)
+#    - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
+#    - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
+log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
+log4j.appender.RMSUMMARY.File={{yarn_log_dir_prefix}}/{{yarn_user}}/${yarn.server.resourcemanager.appsummary.log.file}
+log4j.appender.RMSUMMARY.MaxFileSize=256MB
+log4j.appender.RMSUMMARY.MaxBackupIndex=20
+log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
+log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
+log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
+log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
+log4j.appender.JSA.DatePattern=.yyyy-MM-dd
+log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
+log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
+log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
+{% endif %}
+
+{{rca_prefix}}ambari.jobhistory.database={{ambari_db_rca_url}}
+{{rca_prefix}}ambari.jobhistory.driver={{ambari_db_rca_driver}}
+{{rca_prefix}}ambari.jobhistory.user={{ambari_db_rca_username}}
+{{rca_prefix}}ambari.jobhistory.password={{ambari_db_rca_password}}
+{{rca_prefix}}ambari.jobhistory.logger=DEBUG,JHA
+
+{{rca_prefix}}log4j.appender.JHA=org.apache.ambari.log4j.hadoop.mapreduce.jobhistory.JobHistoryAppender
+{{rca_prefix}}log4j.appender.JHA.database=${ambari.jobhistory.database}
+{{rca_prefix}}log4j.appender.JHA.driver=${ambari.jobhistory.driver}
+{{rca_prefix}}log4j.appender.JHA.user=${ambari.jobhistory.user}
+{{rca_prefix}}log4j.appender.JHA.password=${ambari.jobhistory.password}
+
+{{rca_prefix}}log4j.logger.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger=${ambari.jobhistory.logger}
+{{rca_prefix}}log4j.additivity.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger=true

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/slaves.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/slaves.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/slaves.j2
new file mode 100644
index 0000000..cbcf6c3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/slaves.j2
@@ -0,0 +1,3 @@
+{% for host in slave_hosts %}
+{{host}}
+{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/snmpd.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/snmpd.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/snmpd.conf.j2
new file mode 100644
index 0000000..3530444
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/snmpd.conf.j2
@@ -0,0 +1,48 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+com2sec notConfigUser  {{snmp_source}}   {{snmp_community}}
+group   notConfigGroup v1           notConfigUser
+group   notConfigGroup v2c           notConfigUser
+view    systemview    included   .1
+access  notConfigGroup ""      any       noauth    exact  systemview none none
+
+syslocation Hadoop 
+syscontact HadoopMaster 
+dontLogTCPWrappersConnects yes
+
+###############################################################################
+# disk checks
+
+disk / 10000
+
+
+###############################################################################
+# load average checks
+#
+
+# load [1MAX=12.0] [5MAX=12.0] [15MAX=12.0]
+#
+# 1MAX:   If the 1 minute load average is above this limit at query
+#         time, the errorFlag will be set.
+# 5MAX:   Similar, but for 5 min average.
+# 15MAX:  Similar, but for 15 min average.
+
+# Check for loads:
+#load 12 14 14
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/taskcontroller.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/taskcontroller.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/taskcontroller.cfg.j2
new file mode 100644
index 0000000..d01d37e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/hooks/before-START/templates/taskcontroller.cfg.j2
@@ -0,0 +1,20 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+mapred.local.dir={{mapred_local_dir}}
+mapreduce.tasktracker.group={{mapred_tt_group}}
+hadoop.log.dir={{hdfs_log_dir_prefix}}/{{mapred_user}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/metainfo.xml
new file mode 100644
index 0000000..ca45822
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/metainfo.xml
@@ -0,0 +1,22 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+    <versions>
+	  <active>true</active>
+    </versions>
+</metainfo>


[29/37] AMBARI-4341. Rename 2.0.8 to 2.1.1 in the stack definition. (mahadev)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2
new file mode 100644
index 0000000..c70449d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/templates/zookeeper_client_jaas.conf.j2
@@ -0,0 +1,22 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+Client {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=false
+useTicketCache=true;
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2
new file mode 100644
index 0000000..639cdaa
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/1.3.3/services/ZOOKEEPER/package/templates/zookeeper_jaas.conf.j2
@@ -0,0 +1,25 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+Server {
+com.sun.security.auth.module.Krb5LoginModule required
+useKeyTab=true
+storeKey=true
+useTicketCache=false
+keyTab="{{zk_keytab_path}}"
+principal="{{zk_principal}}";
+};

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.6/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.6/repos/repoinfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.6/repos/repoinfo.xml
index 09f1834..ec62810 100644
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.6/repos/repoinfo.xml
+++ b/ambari-server/src/main/resources/stacks/HDP/2.0.6/repos/repoinfo.xml
@@ -18,56 +18,56 @@
 <reposinfo>
   <os type="centos6">
     <repo>
-      <baseurl>REPLACE_WITH_CENTOS6_URL</baseurl>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0</baseurl>
       <repoid>HDP-2.0.6</repoid>
       <reponame>HDP</reponame>
     </repo>
   </os>
   <os type="centos5">
     <repo>
-      <baseurl>REPLACE_WITH_CENTOS5_URL</baseurl>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos5/2.x/updates/2.0.6.0</baseurl>
       <repoid>HDP-2.0.6</repoid>
       <reponame>HDP</reponame>
     </repo>
   </os>
   <os type="redhat6">
     <repo>
-      <baseurl>REPLACE_WITH_CENTOS6_URL</baseurl>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0</baseurl>
       <repoid>HDP-2.0.6</repoid>
       <reponame>HDP</reponame>
     </repo>
   </os>
   <os type="redhat5">
     <repo>
-      <baseurl>REPLACE_WITH_CENTOS5_URL</baseurl>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos5/2.x/updates/2.0.6.0</baseurl>
       <repoid>HDP-2.0.6</repoid>
       <reponame>HDP</reponame>
     </repo>
   </os>
    <os type="oraclelinux6">
     <repo>
-      <baseurl>REPLACE_WITH_CENTOS6_URL</baseurl>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0</baseurl>
       <repoid>HDP-2.0.6</repoid>
       <reponame>HDP</reponame>
     </repo>
   </os>
   <os type="oraclelinux5">
     <repo>
-      <baseurl>REPLACE_WITH_CENTOS5_URL</baseurl>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos5/2.x/updates/2.0.6.0</baseurl>
       <repoid>HDP-2.0.6</repoid>
       <reponame>HDP</reponame>
     </repo>
   </os>
   <os type="suse11">
     <repo>
-      <baseurl>REPLACE_WITH_SUSE11_URL</baseurl>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP/suse11/2.x/updates/2.0.6.0</baseurl>
       <repoid>HDP-2.0.6</repoid>
       <reponame>HDP</reponame>
     </repo>
   </os>
   <os type="sles11">
     <repo>
-      <baseurl>REPLACE_WITH_SUSE11_URL</baseurl>
+      <baseurl>http://public-repo-1.hortonworks.com/HDP/suse11/2.x/updates/2.0.6.0</baseurl>
       <repoid>HDP-2.0.6</repoid>
       <reponame>HDP</reponame>
     </repo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-INSTALL/files/changeToSecureUid.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-INSTALL/files/changeToSecureUid.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-INSTALL/files/changeToSecureUid.sh
deleted file mode 100644
index 4872a10..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-INSTALL/files/changeToSecureUid.sh
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-
-username=$1
-directories=$2
-
-function find_available_uid() {
- for ((i=1001; i<=2000; i++))
- do
-   grep -q $i /etc/passwd
-   if [ "$?" -ne 0 ]
-   then
-    newUid=$i
-    break
-   fi
- done
-}
-
-find_available_uid
-
-if [ $newUid -eq 0 ]
-then
-  echo "Failed to find Uid between 1000 and 2000"
-  exit 1
-fi
-
-dir_array=($(echo $directories | sed 's/,/\n/g'))
-old_uid=$(id -u $username)
-echo "Changing uid of $username from $old_uid to $newUid"
-echo "Changing directory permisions for ${dir_array[@]}"
-usermod -u $newUid $username && for dir in ${dir_array[@]} ; do chown -Rh $newUid $dir ; done
-exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-INSTALL/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-INSTALL/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-INSTALL/scripts/hook.py
deleted file mode 100644
index 51e5cd2..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-INSTALL/scripts/hook.py
+++ /dev/null
@@ -1,36 +0,0 @@
-##!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-from shared_initialization import *
-
-#TODO this must be "CONFIGURE" hook when CONFIGURE command will be implemented
-class BeforeConfigureHook(Hook):
-
-  def hook(self, env):
-    import params
-
-    env.set_params(params)
-    setup_users()
-    install_packages()
-
-if __name__ == "__main__":
-  BeforeConfigureHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-INSTALL/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-INSTALL/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-INSTALL/scripts/params.py
deleted file mode 100644
index dc6d770..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-INSTALL/scripts/params.py
+++ /dev/null
@@ -1,84 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from resource_management.core.system import System
-import os
-
-config = Script.get_config()
-
-#users and groups
-yarn_user = config['configurations']['global']['yarn_user']
-hbase_user = config['configurations']['global']['hbase_user']
-nagios_user = config['configurations']['global']['nagios_user']
-oozie_user = config['configurations']['global']['oozie_user']
-webhcat_user = config['configurations']['global']['hcat_user']
-hcat_user = config['configurations']['global']['hcat_user']
-hive_user = config['configurations']['global']['hive_user']
-smoke_user =  config['configurations']['global']['smokeuser']
-mapred_user = config['configurations']['global']['mapred_user']
-hdfs_user = config['configurations']['global']['hdfs_user']
-zk_user = config['configurations']['global']['zk_user']
-gmetad_user = config['configurations']['global']["gmetad_user"]
-gmond_user = config['configurations']['global']["gmond_user"]
-storm_user = config['configurations']['global']['storm_user']
-
-user_group = config['configurations']['global']['user_group']
-proxyuser_group =  config['configurations']['global']['proxyuser_group']
-nagios_group = config['configurations']['global']['nagios_group']
-smoke_user_group =  "users"
-mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
-
-#hosts
-hostname = config["hostname"]
-rm_host = default("/clusterHostInfo/rm_host", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
-oozie_servers = default("/clusterHostInfo/oozie_server", [])
-hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
-hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-hs_host = default("/clusterHostInfo/hs_host", [])
-jtnode_host = default("/clusterHostInfo/jtnode_host", [])
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-storm_server_hosts = default("/clusterHostInfo/nimbus_hosts", [])
-
-has_resourcemanager = not len(rm_host) == 0
-has_slaves = not len(slave_hosts) == 0
-has_nagios = not len(hagios_server_hosts) == 0
-has_oozie_server = not len(oozie_servers)  == 0
-has_hcat_server_host = not len(hcat_server_hosts)  == 0
-has_hive_server_host = not len(hive_server_host)  == 0
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_zk_host = not len(zk_hosts) == 0
-has_ganglia_server = not len(ganglia_server_hosts) == 0
-has_storm_server = not len(storm_server_hosts) == 0
-
-is_namenode_master = hostname in namenode_host
-is_jtnode_master = hostname in jtnode_host
-is_rmnode_master = hostname in rm_host
-is_hsnode_master = hostname in hs_host
-is_hbase_master = hostname in hbase_master_hosts
-is_slave = hostname in slave_hosts
-if has_ganglia_server:
-  ganglia_server_host = ganglia_server_hosts[0]
-
-hbase_tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-INSTALL/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-INSTALL/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-INSTALL/scripts/shared_initialization.py
deleted file mode 100644
index cf6c2c5..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-INSTALL/scripts/shared_initialization.py
+++ /dev/null
@@ -1,113 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-
-from resource_management import *
-
-def setup_users():
-  """
-  Creates users before cluster installation
-  """
-  import params
-
-  Group(params.user_group)
-  Group(params.smoke_user_group)
-  Group(params.proxyuser_group)
-  User(params.smoke_user,
-       gid=params.user_group,
-       groups=[params.proxyuser_group]
-  )
-  smoke_user_dirs = format(
-    "/tmp/hadoop-{smoke_user},/tmp/hsperfdata_{smoke_user},/home/{smoke_user},/tmp/{smoke_user},/tmp/sqoop-{smoke_user}")
-  set_uid(params.smoke_user, smoke_user_dirs)
-
-  if params.has_hbase_masters:
-    User(params.hbase_user,
-         gid = params.user_group,
-         groups=[params.user_group])
-    hbase_user_dirs = format(
-      "/home/{hbase_user},/tmp/{hbase_user},/usr/bin/{hbase_user},/var/log/{hbase_user},{hbase_tmp_dir}")
-    set_uid(params.hbase_user, hbase_user_dirs)
-
-  if params.has_nagios:
-    Group(params.nagios_group)
-    User(params.nagios_user,
-         gid=params.nagios_group)
-
-  if params.has_oozie_server:
-    User(params.oozie_user,
-         gid = params.user_group)
-
-  if params.has_hcat_server_host:
-    User(params.webhcat_user,
-         gid = params.user_group)
-    User(params.hcat_user,
-         gid = params.user_group)
-
-  if params.has_hive_server_host:
-    User(params.hive_user,
-         gid = params.user_group)
-
-  if params.has_resourcemanager:
-    User(params.yarn_user,
-         gid = params.user_group)
-
-  if params.has_ganglia_server:
-    Group(params.gmetad_user)
-    Group(params.gmond_user)
-    User(params.gmond_user,
-         gid=params.user_group,
-        groups=[params.gmond_user])
-    User(params.gmetad_user,
-         gid=params.user_group,
-        groups=[params.gmetad_user])
-
-  User(params.hdfs_user,
-        gid=params.user_group,
-        groups=[params.user_group]
-  )
-  User(params.mapred_user,
-       gid=params.user_group,
-       groups=[params.user_group]
-  )
-  if params.has_zk_host:
-    User(params.zk_user,
-         gid=params.user_group)
-
-  if params.has_storm_server:
-    User(params.storm_user,
-         gid=params.user_group,
-         groups=[params.user_group]
-    )
-
-def set_uid(user, user_dirs):
-  """
-  user_dirs - comma separated directories
-  """
-  File("/tmp/changeUid.sh",
-       content=StaticFile("changeToSecureUid.sh"),
-       mode=0555)
-  Execute(format("/tmp/changeUid.sh {user} {user_dirs} 2>/dev/null"),
-          not_if = format("test $(id -u {user}) -gt 1000"))
-
-def install_packages():
-  Package("unzip")
-  Package("net-snmp")
-  Package("net-snmp-utils")
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/files/checkForFormat.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/files/checkForFormat.sh
deleted file mode 100644
index d14091a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/files/checkForFormat.sh
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-export hdfs_user=$1
-shift
-export conf_dir=$1
-shift
-export mark_dir=$1
-shift
-export name_dirs=$*
-
-export EXIT_CODE=0
-export command="namenode -format"
-export list_of_non_empty_dirs=""
-
-mark_file=/var/run/hadoop/hdfs/namenode-formatted
-if [[ -f ${mark_file} ]] ; then
-  rm -f ${mark_file}
-  mkdir -p ${mark_dir}
-fi
-
-if [[ ! -d $mark_dir ]] ; then
-  for dir in `echo $name_dirs | tr ',' ' '` ; do
-    echo "NameNode Dirname = $dir"
-    cmd="ls $dir | wc -l  | grep -q ^0$"
-    eval $cmd
-    if [[ $? -ne 0 ]] ; then
-      (( EXIT_CODE = $EXIT_CODE + 1 ))
-      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
-    fi
-  done
-
-  if [[ $EXIT_CODE == 0 ]] ; then
-    su - ${hdfs_user} -c "yes Y | hadoop --config ${conf_dir} ${command}"
-  else
-    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
-  fi
-else
-  echo "${mark_dir} exists. Namenode DFS already formatted"
-fi
-
-exit $EXIT_CODE
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/files/task-log4j.properties
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/files/task-log4j.properties b/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/files/task-log4j.properties
deleted file mode 100644
index c8939fc..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/files/task-log4j.properties
+++ /dev/null
@@ -1,132 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-
-# Define some default values that can be overridden by system properties
-hadoop.root.logger=INFO,console
-hadoop.log.dir=.
-hadoop.log.file=hadoop.log
-
-#
-# Job Summary Appender 
-#
-# Use following logger to send summary to separate file defined by 
-# hadoop.mapreduce.jobsummary.log.file rolled daily:
-# hadoop.mapreduce.jobsummary.logger=INFO,JSA
-# 
-hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
-hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
-
-# Define the root logger to the system property "hadoop.root.logger".
-log4j.rootLogger=${hadoop.root.logger}, EventCounter
-
-# Logging Threshold
-log4j.threshhold=ALL
-
-#
-# Daily Rolling File Appender
-#
-
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Rollver at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-# 30-day backup
-#log4j.appender.DRFA.MaxBackupIndex=30
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this 
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-
-#
-# TaskLog Appender
-#
-
-#Default values
-hadoop.tasklog.taskid=null
-hadoop.tasklog.iscleanup=false
-hadoop.tasklog.noKeepSplits=4
-hadoop.tasklog.totalLogFileSize=100
-hadoop.tasklog.purgeLogSplits=true
-hadoop.tasklog.logsRetainHours=12
-
-log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
-log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
-log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
-log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
-
-log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
-log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-
-#
-# Rolling File Appender
-#
-
-#log4j.appender.RFA=org.apache.log4j.RollingFileAppender
-#log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Logfile size and and 30-day backups
-#log4j.appender.RFA.MaxFileSize=1MB
-#log4j.appender.RFA.MaxBackupIndex=30
-
-#log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
-#log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-# Custom Logging levels
-
-hadoop.metrics.log.level=INFO
-#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
-#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
-#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
-log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
-
-# Jets3t library
-log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
-
-#
-# Null Appender
-# Trap security logger on the hadoop client side
-#
-log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
-
-#
-# Event Counter Appender
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
-#
-log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/scripts/hook.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/scripts/hook.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/scripts/hook.py
deleted file mode 100644
index e11bfac..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/scripts/hook.py
+++ /dev/null
@@ -1,37 +0,0 @@
-##!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-from shared_initialization import *
-
-#TODO this must be "CONFIGURE" hook when CONFIGURE command will be implemented
-class BeforeConfigureHook(Hook):
-
-  def hook(self, env):
-    import params
-
-    env.set_params(params)
-    setup_java()
-    setup_hadoop()
-    setup_configs()
-
-if __name__ == "__main__":
-  BeforeConfigureHook().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/scripts/params.py
deleted file mode 100644
index 712a5ab..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/scripts/params.py
+++ /dev/null
@@ -1,172 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from resource_management.core.system import System
-import os
-
-config = Script.get_config()
-
-#java params
-artifact_dir = "/tmp/HDP-artifacts/"
-jdk_name = default("/hostLevelParams/jdk_name", None) # None when jdk is already installed by user
-jce_policy_zip = default("/hostLevelParams/jce_name", None) # None when jdk is already installed by user
-jce_location = config['hostLevelParams']['jdk_location']
-jdk_location = config['hostLevelParams']['jdk_location']
-#security params
-security_enabled = config['configurations']['global']['security_enabled']
-dfs_journalnode_keytab_file = config['configurations']['hdfs-site']['dfs.journalnode.keytab.file']
-dfs_web_authentication_kerberos_keytab = config['configurations']['hdfs-site']['dfs.journalnode.keytab.file']
-dfs_secondary_namenode_keytab_file =  config['configurations']['hdfs-site']['fs.secondary.namenode.keytab.file']
-dfs_datanode_keytab_file =  config['configurations']['hdfs-site']['dfs.datanode.keytab.file']
-dfs_namenode_keytab_file =  config['configurations']['hdfs-site']['dfs.namenode.keytab.file']
-
-dfs_datanode_kerberos_principal = config['configurations']['hdfs-site']['dfs.datanode.kerberos.principal']
-dfs_journalnode_kerberos_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.principal']
-dfs_secondary_namenode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.internal.spnego.principal']
-dfs_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.namenode.kerberos.principal']
-dfs_web_authentication_kerberos_principal = config['configurations']['hdfs-site']['dfs.web.authentication.kerberos.principal']
-dfs_secondary_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.principal']
-dfs_journalnode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.internal.spnego.principal']
-
-#users and groups
-mapred_user = config['configurations']['global']['mapred_user']
-hdfs_user = config['configurations']['global']['hdfs_user']
-yarn_user = config['configurations']['global']['yarn_user']
-
-user_group = config['configurations']['global']['user_group']
-mapred_tt_group = default("/configurations/mapred-site/mapreduce.tasktracker.group", user_group)
-
-#snmp
-snmp_conf_dir = "/etc/snmp/"
-snmp_source = "0.0.0.0/0"
-snmp_community = "hadoop"
-
-#hosts
-hostname = config["hostname"]
-rm_host = default("/clusterHostInfo/rm_host", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
-oozie_servers = default("/clusterHostInfo/oozie_server", [])
-hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
-hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-hs_host = default("/clusterHostInfo/hs_host", [])
-jtnode_host = default("/clusterHostInfo/jtnode_host", [])
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-zk_hosts = default("/clusterHostInfo/zookeeper_hosts", [])
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-
-has_resourcemanager = not len(rm_host) == 0
-has_slaves = not len(slave_hosts) == 0
-has_nagios = not len(hagios_server_hosts) == 0
-has_oozie_server = not len(oozie_servers)  == 0
-has_hcat_server_host = not len(hcat_server_hosts)  == 0
-has_hive_server_host = not len(hive_server_host)  == 0
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_zk_host = not len(zk_hosts) == 0
-has_ganglia_server = not len(ganglia_server_hosts) == 0
-
-is_namenode_master = hostname in namenode_host
-is_jtnode_master = hostname in jtnode_host
-is_rmnode_master = hostname in rm_host
-is_hsnode_master = hostname in hs_host
-is_hbase_master = hostname in hbase_master_hosts
-is_slave = hostname in slave_hosts
-if has_ganglia_server:
-  ganglia_server_host = ganglia_server_hosts[0]
-#hadoop params
-hadoop_tmp_dir = format("/tmp/hadoop-{hdfs_user}")
-hadoop_lib_home = "/usr/lib/hadoop/lib"
-hadoop_conf_dir = "/etc/hadoop/conf"
-hadoop_pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix']
-hadoop_home = "/usr"
-hadoop_bin = "/usr/lib/hadoop/sbin"
-
-task_log4j_properties_location = os.path.join(hadoop_conf_dir, "task-log4j.properties")
-limits_conf_dir = "/etc/security/limits.d"
-
-hdfs_log_dir_prefix = config['configurations']['global']['hdfs_log_dir_prefix']
-hbase_tmp_dir = config['configurations']['hbase-site']['hbase.tmp.dir']
-#db params
-server_db_name = config['hostLevelParams']['db_name']
-db_driver_filename = config['hostLevelParams']['db_driver_filename']
-oracle_driver_url = config['hostLevelParams']['oracle_jdbc_url']
-mysql_driver_url = config['hostLevelParams']['mysql_jdbc_url']
-
-ambari_db_rca_url = config['hostLevelParams']['ambari_db_rca_url'][0]
-ambari_db_rca_driver = config['hostLevelParams']['ambari_db_rca_driver'][0]
-ambari_db_rca_username = config['hostLevelParams']['ambari_db_rca_username'][0]
-ambari_db_rca_password = config['hostLevelParams']['ambari_db_rca_password'][0]
-
-rca_enabled = config['configurations']['global']['rca_enabled']
-rca_disabled_prefix = "###"
-if rca_enabled == True:
-  rca_prefix = ""
-else:
-  rca_prefix = rca_disabled_prefix
-
-#hadoop-env.sh
-java_home = config['hostLevelParams']['java_home']
-if System.get_instance().platform == "suse":
-  jsvc_path = "/usr/lib/bigtop-utils"
-else:
-  jsvc_path = "/usr/libexec/bigtop-utils"
-
-hadoop_heapsize = config['configurations']['global']['hadoop_heapsize']
-namenode_heapsize = config['configurations']['global']['namenode_heapsize']
-namenode_opt_newsize =  config['configurations']['global']['namenode_opt_newsize']
-namenode_opt_maxnewsize =  config['configurations']['global']['namenode_opt_maxnewsize']
-
-jtnode_opt_newsize = default("jtnode_opt_newsize","200m")
-jtnode_opt_maxnewsize = default("jtnode_opt_maxnewsize","200m")
-jtnode_heapsize =  default("jtnode_heapsize","1024m")
-ttnode_heapsize = "1024m"
-
-dtnode_heapsize = config['configurations']['global']['dtnode_heapsize']
-mapred_pid_dir_prefix = default("mapred_pid_dir_prefix","/var/run/hadoop-mapreduce")
-mapreduce_libs_path = "/usr/lib/hadoop-mapreduce/*"
-hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
-mapred_log_dir_prefix = default("mapred_log_dir_prefix","/var/log/hadoop-mapreduce")
-
-#taskcontroller.cfg
-
-mapred_local_dir = "/tmp/hadoop-mapred/mapred/local"
-
-#log4j.properties
-
-yarn_log_dir_prefix = default("yarn_log_dir_prefix","/var/log/hadoop-yarn")
-
-#hdfs ha properties
-dfs_ha_enabled = False
-dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
-dfs_ha_namenode_ids = default(format("hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
-if dfs_ha_namenode_ids:
-  dfs_ha_namenode_ids_array_len = len(dfs_ha_namenode_ids.split(","))
-  if dfs_ha_namenode_ids_array_len > 1:
-    dfs_ha_enabled = True
-
-if dfs_ha_enabled:
-  for nn_id in dfs_ha_namenode_ids:
-    nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
-    if hostname in nn_host:
-      namenode_id = nn_id
-  namenode_id = None
-
-dfs_hosts = default('/configurations/hdfs-site/dfs.hosts', None)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/scripts/shared_initialization.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/scripts/shared_initialization.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/scripts/shared_initialization.py
deleted file mode 100644
index f2644aa..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/scripts/shared_initialization.py
+++ /dev/null
@@ -1,327 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import os
-
-from resource_management import *
-
-def setup_java():
-  """
-  Installs jdk using specific params, that comes from ambari-server
-  """
-  import params
-
-  jdk_curl_target = format("{artifact_dir}/{jdk_name}")
-  java_dir = os.path.dirname(params.java_home)
-  java_exec = format("{java_home}/bin/java")
-  
-  if not params.jdk_name:
-    return
-  
-  Execute(format("mkdir -p {artifact_dir} ; curl -kf --retry 10 {jdk_location}/{jdk_name} -o {jdk_curl_target}"),
-          path = ["/bin","/usr/bin/"],
-          not_if = format("test -e {java_exec}"))
-
-  if params.jdk_name.endswith(".bin"):
-    install_cmd = format("mkdir -p {java_dir} ; chmod +x {jdk_curl_target}; cd {java_dir} ; echo A | {jdk_curl_target} -noregister > /dev/null 2>&1")
-  elif params.jdk_name.endswith(".gz"):
-    install_cmd = format("mkdir -p {java_dir} ; cd {java_dir} ; tar -xf {jdk_curl_target} > /dev/null 2>&1")
-  
-  Execute(install_cmd,
-          path = ["/bin","/usr/bin/"],
-          not_if = format("test -e {java_exec}")
-  )
-  jce_curl_target = format("{artifact_dir}/{jce_policy_zip}")
-  download_jce = format("mkdir -p {artifact_dir}; curl -kf --retry 10 {jce_location}/{jce_policy_zip} -o {jce_curl_target}")
-  Execute( download_jce,
-        path = ["/bin","/usr/bin/"],
-        not_if =format("test -e {jce_curl_target}"),
-        ignore_failures = True
-  )
-  
-  if params.security_enabled:
-    security_dir = format("{java_home}/jre/lib/security")
-    extract_cmd = format("rm -f local_policy.jar; rm -f US_export_policy.jar; unzip -o -j -q {jce_curl_target}")
-    Execute(extract_cmd,
-          only_if = format("test -e {security_dir} && test -f {jce_curl_target}"),
-          cwd  = security_dir,
-          path = ['/bin/','/usr/bin']
-    )
-
-def setup_hadoop():
-  """
-  Setup hadoop files and directories
-  """
-  import params
-
-  File(os.path.join(params.snmp_conf_dir, 'snmpd.conf'),
-       content=Template("snmpd.conf.j2"))
-  Service("snmpd",
-          action = "restart")
-
-  Execute("/bin/echo 0 > /selinux/enforce",
-          only_if="test -f /selinux/enforce"
-  )
-
-  install_snappy()
-
-  #directories
-  Directory(params.hadoop_conf_dir,
-            recursive=True,
-            owner='root',
-            group='root'
-  )
-  Directory(params.hdfs_log_dir_prefix,
-            recursive=True,
-            owner='root',
-            group='root'
-  )
-  Directory(params.hadoop_pid_dir_prefix,
-            recursive=True,
-            owner='root',
-            group='root'
-  )
-  #this doesn't needed with stack 1
-  Directory(os.path.dirname(params.hadoop_tmp_dir),
-            recursive=True,
-            owner=params.hdfs_user,
-            )
-  #files
-  File(os.path.join(params.limits_conf_dir, 'hdfs.conf'),
-       owner='root',
-       group='root',
-       mode=0644,
-       content=Template("hdfs.conf.j2")
-  )
-  if params.security_enabled:
-    File(os.path.join(params.hadoop_bin, "task-controller"),
-         owner="root",
-         group=params.mapred_tt_group,
-         mode=06050
-    )
-    tc_mode = 0644
-    tc_owner = "root"
-  else:
-    tc_mode = None
-    tc_owner = params.hdfs_user
-
-  if tc_mode:
-    File(os.path.join(params.hadoop_conf_dir, 'taskcontroller.cfg'),
-         owner = tc_owner,
-         mode = tc_mode,
-         group = params.mapred_tt_group,
-         content=Template("taskcontroller.cfg.j2")
-    )
-  else:
-    File(os.path.join(params.hadoop_conf_dir, 'taskcontroller.cfg'),
-         owner=tc_owner,
-         content=Template("taskcontroller.cfg.j2")
-    )
-  for file in ['hadoop-env.sh', 'commons-logging.properties', 'slaves']:
-    File(os.path.join(params.hadoop_conf_dir, file),
-         owner=tc_owner,
-         content=Template(file + ".j2")
-    )
-
-  health_check_template = "health_check-v2" #for stack 1 use 'health_check'
-  File(os.path.join(params.hadoop_conf_dir, "health_check"),
-       owner=tc_owner,
-       content=Template(health_check_template + ".j2")
-  )
-
-  File(os.path.join(params.hadoop_conf_dir, "log4j.properties"),
-       owner=params.hdfs_user,
-       content=Template("log4j.properties.j2")
-  )
-
-  update_log4j_props(os.path.join(params.hadoop_conf_dir, "log4j.properties"))
-
-  File(os.path.join(params.hadoop_conf_dir, "hadoop-metrics2.properties"),
-       owner=params.hdfs_user,
-       content=Template("hadoop-metrics2.properties.j2")
-  )
-
-  db_driver_dload_cmd = ""
-  if params.server_db_name == 'oracle' and params.oracle_driver_url != "":
-    db_driver_dload_cmd = format(
-      "curl -kf --retry 5 {oracle_driver_url} -o {hadoop_lib_home}/{db_driver_filename}")
-  elif params.server_db_name == 'mysql' and params.mysql_driver_url != "":
-    db_driver_dload_cmd = format(
-      "curl -kf --retry 5 {mysql_driver_url} -o {hadoop_lib_home}/{db_driver_filename}")
-
-  if db_driver_dload_cmd:
-    Execute(db_driver_dload_cmd,
-            not_if =format("test -e {hadoop_lib_home}/{db_driver_filename}")
-    )
-
-
-def setup_configs():
-  """
-  Creates configs for services DHFS mapred
-  """
-  import params
-
-  if "mapred-queue-acls" in params.config['configurations']:
-    XmlConfig("mapred-queue-acls.xml",
-              conf_dir=params.hadoop_conf_dir,
-              configurations=params.config['configurations'][
-                'mapred-queue-acls'],
-              owner=params.mapred_user,
-              group=params.user_group
-    )
-  elif os.path.exists(
-      os.path.join(params.hadoop_conf_dir, "mapred-queue-acls.xml")):
-    File(os.path.join(params.hadoop_conf_dir, "mapred-queue-acls.xml"),
-         owner=params.mapred_user,
-         group=params.user_group
-    )
-
-  if "hadoop-policy" in params.config['configurations']:
-    XmlConfig("hadoop-policy.xml",
-              conf_dir=params.hadoop_conf_dir,
-              configurations=params.config['configurations']['hadoop-policy'],
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-
-  XmlConfig("core-site.xml",
-            conf_dir=params.hadoop_conf_dir,
-            configurations=params.config['configurations']['core-site'],
-            owner=params.hdfs_user,
-            group=params.user_group
-  )
-
-  if "mapred-site" in params.config['configurations']:
-    XmlConfig("mapred-site.xml",
-              conf_dir=params.hadoop_conf_dir,
-              configurations=params.config['configurations']['mapred-site'],
-              owner=params.mapred_user,
-              group=params.user_group
-    )
-
-  File(params.task_log4j_properties_location,
-       content=StaticFile("task-log4j.properties"),
-       mode=0755
-  )
-
-  if "capacity-scheduler" in params.config['configurations']:
-    XmlConfig("capacity-scheduler.xml",
-              conf_dir=params.hadoop_conf_dir,
-              configurations=params.config['configurations'][
-                'capacity-scheduler'],
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-
-  XmlConfig("hdfs-site.xml",
-            conf_dir=params.hadoop_conf_dir,
-            configurations=params.config['configurations']['hdfs-site'],
-            owner=params.hdfs_user,
-            group=params.user_group
-  )
-
-  # if params.stack_version[0] == "1":
-  #   Link('/usr/lib/hadoop/hadoop-tools.jar',
-  #         to = '/usr/lib/hadoop/lib/hadoop-tools.jar',
-  #         mode = 0755
-  #   )
-
-  if os.path.exists(os.path.join(params.hadoop_conf_dir, 'configuration.xsl')):
-    File(os.path.join(params.hadoop_conf_dir, 'configuration.xsl'),
-         owner=params.hdfs_user,
-         group=params.user_group
-    )
-  if os.path.exists(os.path.join(params.hadoop_conf_dir, 'fair-scheduler.xml')):
-    File(os.path.join(params.hadoop_conf_dir, 'fair-scheduler.xml'),
-         owner=params.mapred_user,
-         group=params.user_group
-    )
-  if os.path.exists(os.path.join(params.hadoop_conf_dir, 'masters')):
-    File(os.path.join(params.hadoop_conf_dir, 'masters'),
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-  if os.path.exists(
-      os.path.join(params.hadoop_conf_dir, 'ssl-client.xml.example')):
-    File(os.path.join(params.hadoop_conf_dir, 'ssl-client.xml.example'),
-         owner=params.mapred_user,
-         group=params.user_group
-    )
-  if os.path.exists(
-      os.path.join(params.hadoop_conf_dir, 'ssl-server.xml.example')):
-    File(os.path.join(params.hadoop_conf_dir, 'ssl-server.xml.example'),
-         owner=params.mapred_user,
-         group=params.user_group
-    )
-
-  generate_include_file()
-
-def update_log4j_props(file):
-  import params
-
-  property_map = {
-    'ambari.jobhistory.database': params.ambari_db_rca_url,
-    'ambari.jobhistory.driver': params.ambari_db_rca_driver,
-    'ambari.jobhistory.user': params.ambari_db_rca_username,
-    'ambari.jobhistory.password': params.ambari_db_rca_password,
-    'ambari.jobhistory.logger': 'DEBUG,JHA',
-
-    'log4j.appender.JHA': 'org.apache.ambari.log4j.hadoop.mapreduce.jobhistory.JobHistoryAppender',
-    'log4j.appender.JHA.database': '${ambari.jobhistory.database}',
-    'log4j.appender.JHA.driver': '${ambari.jobhistory.driver}',
-    'log4j.appender.JHA.user': '${ambari.jobhistory.user}',
-    'log4j.appender.JHA.password': '${ambari.jobhistory.password}',
-
-    'log4j.logger.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger': '${ambari.jobhistory.logger}',
-    'log4j.additivity.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger': 'true'
-  }
-  for key in property_map:
-    value = property_map[key]
-    Execute(format(
-      "sed -i 's~\\({rca_disabled_prefix}\\)\\?{key}=.*~{rca_prefix}{key}={value}~' {file}"))
-
-
-def generate_include_file():
-  import params
-
-  if params.dfs_hosts and params.has_slaves:
-    include_hosts_list = params.slave_hosts
-    File(params.dfs_hosts,
-         content=Template("include_hosts_list.j2"),
-         owner=params.hdfs_user,
-         group=params.user_group
-    )
-
-
-def install_snappy():
-  import params
-
-  snappy_so = "libsnappy.so"
-  so_target_dir_x86 = format("{hadoop_lib_home}/native/Linux-i386-32")
-  so_target_dir_x64 = format("{hadoop_lib_home}/native/Linux-amd64-64")
-  so_target_x86 = format("{so_target_dir_x86}/{snappy_so}")
-  so_target_x64 = format("{so_target_dir_x64}/{snappy_so}")
-  so_src_dir_x86 = format("{hadoop_home}/lib")
-  so_src_dir_x64 = format("{hadoop_home}/lib64")
-  so_src_x86 = format("{so_src_dir_x86}/{snappy_so}")
-  so_src_x64 = format("{so_src_dir_x64}/{snappy_so}")
-  Execute(
-    format("mkdir -p {so_target_dir_x86}; ln -sf {so_src_x86} {so_target_x86}"))
-  Execute(
-    format("mkdir -p {so_target_dir_x64}; ln -sf {so_src_x64} {so_target_x64}"))

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/commons-logging.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/commons-logging.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/commons-logging.properties.j2
deleted file mode 100644
index 77e458f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/commons-logging.properties.j2
+++ /dev/null
@@ -1,25 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-#Logging Implementation
-
-#Log4J
-org.apache.commons.logging.Log=org.apache.commons.logging.impl.Log4JLogger
-
-#JDK Logger
-#org.apache.commons.logging.Log=org.apache.commons.logging.impl.Jdk14Logger

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/exclude_hosts_list.j2
deleted file mode 100644
index bb5795b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/exclude_hosts_list.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-{% for host in hdfs_exclude_file %}
-{{host}}
-{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/hadoop-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/hadoop-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/hadoop-env.sh.j2
deleted file mode 100644
index 7d10cc3..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/hadoop-env.sh.j2
+++ /dev/null
@@ -1,121 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-# Set Hadoop-specific environment variables here.
-
-# The only required environment variable is JAVA_HOME.  All others are
-# optional.  When running a distributed configuration it is best to
-# set JAVA_HOME in this file, so that it is correctly defined on
-# remote nodes.
-
-# The java implementation to use.  Required.
-export JAVA_HOME={{java_home}}
-export HADOOP_HOME_WARN_SUPPRESS=1
-
-# Hadoop Configuration Directory
-#TODO: if env var set that can cause problems
-export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-{{hadoop_conf_dir}}}
-
-{# this is different for HDP1 #}
-# Path to jsvc required by secure HDP 2.0 datanode
-export JSVC_HOME={{jsvc_path}}
-
-
-# The maximum amount of heap to use, in MB. Default is 1000.
-export HADOOP_HEAPSIZE="{{hadoop_heapsize}}"
-
-export HADOOP_NAMENODE_INIT_HEAPSIZE="-Xms{{namenode_heapsize}}"
-
-# Extra Java runtime options.  Empty by default.
-export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true ${HADOOP_OPTS}"
-
-# Command specific options appended to HADOOP_OPTS when specified
-export HADOOP_NAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xms{{namenode_heapsize}} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_NAMENODE_OPTS}"
-HADOOP_JOBTRACKER_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{jtnode_opt_newsize}} -XX:MaxNewSize={{jtnode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps -Xmx{{jtnode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dhadoop.mapreduce.jobsummary.logger=INFO,JSA ${HADOOP_JOBTRACKER_OPTS}"
-
-HADOOP_TASKTRACKER_OPTS="-server -Xmx{{ttnode_heapsize}} -Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console ${HADOOP_TASKTRACKER_OPTS}"
-HADOOP_DATANODE_OPTS="-Xmx{{dtnode_heapsize}} -Dhadoop.security.logger=ERROR,DRFAS ${HADOOP_DATANODE_OPTS}"
-HADOOP_BALANCER_OPTS="-server -Xmx{{hadoop_heapsize}}m ${HADOOP_BALANCER_OPTS}"
-
-export HADOOP_SECONDARYNAMENODE_OPTS="-server -XX:ParallelGCThreads=8 -XX:+UseConcMarkSweepGC -XX:ErrorFile={{hdfs_log_dir_prefix}}/$USER/hs_err_pid%p.log -XX:NewSize={{namenode_opt_newsize}} -XX:MaxNewSize={{namenode_opt_maxnewsize}} -Xloggc:{{hdfs_log_dir_prefix}}/$USER/gc.log-`date +'%Y%m%d%H%M'` -verbose:gc -XX:+PrintGCDetails -XX:+PrintGCTimeStamps -XX:+PrintGCDateStamps ${HADOOP_NAMENODE_INIT_HEAPSIZE} -Xmx{{namenode_heapsize}} -Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT ${HADOOP_SECONDARYNAMENODE_OPTS}"
-
-# The following applies to multiple commands (fs, dfs, fsck, distcp etc)
-export HADOOP_CLIENT_OPTS="-Xmx${HADOOP_HEAPSIZE}m $HADOOP_CLIENT_OPTS"
-# On secure datanodes, user to run the datanode as after dropping privileges
-export HADOOP_SECURE_DN_USER={{hdfs_user}}
-
-# Extra ssh options.  Empty by default.
-export HADOOP_SSH_OPTS="-o ConnectTimeout=5 -o SendEnv=HADOOP_CONF_DIR"
-
-# Where log files are stored.  $HADOOP_HOME/logs by default.
-export HADOOP_LOG_DIR={{hdfs_log_dir_prefix}}/$USER
-
-# History server logs
-export HADOOP_MAPRED_LOG_DIR={{mapred_log_dir_prefix}}/$USER
-
-# Where log files are stored in the secure data environment.
-export HADOOP_SECURE_DN_LOG_DIR={{hdfs_log_dir_prefix}}/$HADOOP_SECURE_DN_USER
-
-# File naming remote slave hosts.  $HADOOP_HOME/conf/slaves by default.
-# export HADOOP_SLAVES=${HADOOP_HOME}/conf/slaves
-
-# host:path where hadoop code should be rsync'd from.  Unset by default.
-# export HADOOP_MASTER=master:/home/$USER/src/hadoop
-
-# Seconds to sleep between slave commands.  Unset by default.  This
-# can be useful in large clusters, where, e.g., slave rsyncs can
-# otherwise arrive faster than the master can service them.
-# export HADOOP_SLAVE_SLEEP=0.1
-
-# The directory where pid files are stored. /tmp by default.
-export HADOOP_PID_DIR={{hadoop_pid_dir_prefix}}/$USER
-export HADOOP_SECURE_DN_PID_DIR={{hadoop_pid_dir_prefix}}/$HADOOP_SECURE_DN_USER
-
-# History server pid
-export HADOOP_MAPRED_PID_DIR={{mapred_pid_dir_prefix}}/$USER
-
-YARN_RESOURCEMANAGER_OPTS="-Dyarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY"
-
-# A string representing this instance of hadoop. $USER by default.
-export HADOOP_IDENT_STRING=$USER
-
-# The scheduling priority for daemon processes.  See 'man nice'.
-
-# export HADOOP_NICENESS=10
-
-# Use libraries from standard classpath
-JAVA_JDBC_LIBS=""
-#Add libraries required by mysql connector
-for jarFile in `ls /usr/share/java/*mysql* 2>/dev/null`
-do
-  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
-done
-#Add libraries required by oracle connector
-for jarFile in `ls /usr/share/java/*ojdbc* 2>/dev/null`
-do
-  JAVA_JDBC_LIBS=${JAVA_JDBC_LIBS}:$jarFile
-done
-#Add libraries required by nodemanager
-MAPREDUCE_LIBS={{mapreduce_libs_path}}
-export HADOOP_CLASSPATH=${HADOOP_CLASSPATH}${JAVA_JDBC_LIBS}:${MAPREDUCE_LIBS}
-
-# Setting path to hdfs command line
-export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
-
-#Mostly required for hadoop 2.0
-export JAVA_LIBRARY_PATH=${JAVA_LIBRARY_PATH}:/usr/lib/hadoop/lib/native/Linux-amd64-64

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/hadoop-metrics2.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/hadoop-metrics2.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/hadoop-metrics2.properties.j2
deleted file mode 100644
index a6a66ef..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/hadoop-metrics2.properties.j2
+++ /dev/null
@@ -1,45 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements. See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License. You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# syntax: [prefix].[source|sink|jmx].[instance].[options]
-# See package.html for org.apache.hadoop.metrics2 for details
-
-{% if has_ganglia_server %}
-*.period=60
-
-*.sink.ganglia.class=org.apache.hadoop.metrics2.sink.ganglia.GangliaSink31
-*.sink.ganglia.period=10
-
-# default for supportsparse is false
-*.sink.ganglia.supportsparse=true
-
-.sink.ganglia.slope=jvm.metrics.gcCount=zero,jvm.metrics.memHeapUsedM=both
-.sink.ganglia.dmax=jvm.metrics.threadsBlocked=70,jvm.metrics.memHeapUsedM=40
-
-# Hook up to the server
-namenode.sink.ganglia.servers={{ganglia_server_host}}:8661
-datanode.sink.ganglia.servers={{ganglia_server_host}}:8659
-jobtracker.sink.ganglia.servers={{ganglia_server_host}}:8662
-tasktracker.sink.ganglia.servers={{ganglia_server_host}}:8658
-maptask.sink.ganglia.servers={{ganglia_server_host}}:8660
-reducetask.sink.ganglia.servers={{ganglia_server_host}}:8660
-resourcemanager.sink.ganglia.servers={{ganglia_server_host}}:8664
-nodemanager.sink.ganglia.servers={{ganglia_server_host}}:8657
-historyserver.sink.ganglia.servers={{ganglia_server_host}}:8666
-journalnode.sink.ganglia.servers={{ganglia_server_host}}:8654
-
-resourcemanager.sink.ganglia.tagsForPrefix.yarn=Queue
-
-{% endif %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/hdfs.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/hdfs.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/hdfs.conf.j2
deleted file mode 100644
index ca7baa2..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/hdfs.conf.j2
+++ /dev/null
@@ -1,17 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-{{hdfs_user}}   - nofile 32768
-{{hdfs_user}}   - nproc  65536

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/health_check-v2.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/health_check-v2.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/health_check-v2.j2
deleted file mode 100644
index cb7b12b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/health_check-v2.j2
+++ /dev/null
@@ -1,91 +0,0 @@
-#!/bin/bash
-#
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-err=0;
-
-function check_disks {
-
-  for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do
-    fsdev=""
-    fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`;
-    if [ -z "$fsdev" -a "$m" != "/mnt" ] ; then
-      msg_="$msg_ $m(u)"
-    else
-      msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`"
-    fi
-  done
-
-  if [ -z "$msg_" ] ; then
-    echo "disks ok" ; exit 0
-  else
-    echo "$msg_" ; exit 2
-  fi
-
-}
-
-function check_link {
-  snmp=/usr/bin/snmpwalk
-  if [ -e $snmp ] ; then
-    $snmp -t 5 -Oe  -Oq  -Os -v 1 -c public localhost if | \
-    awk ' {
-      split($1,a,".") ;
-      if ( a[1] == "ifIndex" ) { ifIndex[a[2]] = $2 }
-      if ( a[1] == "ifDescr" ) { ifDescr[a[2]] = $2 }
-      if ( a[1] == "ifType" ) { ifType[a[2]] = $2 }
-      if ( a[1] == "ifSpeed" ) { ifSpeed[a[2]] = $2 }
-      if ( a[1] == "ifAdminStatus" ) { ifAdminStatus[a[2]] = $2 }
-      if ( a[1] == "ifOperStatus" ) { ifOperStatus[a[2]] = $2 }
-    }
-    END {
-      up=0;
-      for (i in ifIndex ) {
-      if ( ifType[i] == 6 && ifAdminStatus[i] == 1 && ifOperStatus[i] == 1 && ifSpeed[i] == 1000000000 ) {
-      up=i;
-      }
-      }
-      if ( up == 0 ) { print "check link" ; exit 2 }
-      else { print ifDescr[up],"ok" }
-    }'
-    exit $? ;
-  fi
-}
-
-# Run all checks
-# Disabled 'check_link' for now... 
-for check in disks ; do
-  msg=`check_${check}` ;
-  if [ $? -eq 0 ] ; then
-    ok_msg="$ok_msg$msg,"
-  else
-    err_msg="$err_msg$msg,"
-  fi
-done
-
-if [ ! -z "$err_msg" ] ; then
-  echo -n "ERROR $err_msg "
-fi
-if [ ! -z "$ok_msg" ] ; then
-  echo -n "OK: $ok_msg"
-fi
-
-echo
-
-# Success!
-exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/health_check.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/health_check.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/health_check.j2
deleted file mode 100644
index b84b336..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/health_check.j2
+++ /dev/null
@@ -1,118 +0,0 @@
-#!/bin/bash
-#
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-err=0;
-
-function check_disks {
-
-  for m in `awk '$3~/ext3/ {printf" %s ",$2}' /etc/fstab` ; do
-    fsdev=""
-    fsdev=`awk -v m=$m '$2==m {print $1}' /proc/mounts`;
-    if [ -z "$fsdev" ] ; then
-      msg_="$msg_ $m(u)"
-    else
-      msg_="$msg_`awk -v m=$m '$2==m { if ( $4 ~ /^ro,/ ) {printf"%s(ro)",$2 } ; }' /proc/mounts`"
-    fi
-  done
-
-  if [ -z "$msg_" ] ; then
-    echo "disks ok" ; exit 0
-  else
-    echo "$msg_" ; exit 2
-  fi
-
-}
-
-function check_taskcontroller {
-  if [ "<%=scope.function_hdp_template_var("::hdp::params::security_enabled")%>" == "true" ]; then
-    perm=`stat -c %a:%U:%G <%=scope.function_hdp_template_var("task_bin_exe")%> 2>/dev/null`
-    if [ $? -eq 0 ] && [ "$perm" == "6050:root:hadoop" ] ; then
-      echo "taskcontroller ok"
-    else
-      echo 'check taskcontroller' ; exit 1
-    fi
-  fi
-}
-
-function check_jetty {
-  hname=`hostname`
-  jmx=`curl -s -S -m 5 "http://$hname:<%=scope.function_hdp_template_var("::hdp::tasktracker_port")%>/jmx?qry=Hadoop:service=TaskTracker,name=ShuffleServerMetrics" 2>/dev/null` ;
-  if [ $? -eq 0 ] ; then
-    e=`echo $jmx | awk '/shuffle_exceptions_caught/ {printf"%d",$2}'` ;
-    e=${e:-0} # no jmx servlet ?
-    if [ $e -gt 10 ] ; then
-      echo "check jetty: shuffle_exceptions=$e" ; exit 1
-    else
-      echo "jetty ok"
-    fi
-  else
-    echo "check jetty: ping failed" ; exit 1
-  fi
-}
-
-function check_link {
-  snmp=/usr/bin/snmpwalk
-  if [ -e $snmp ] ; then
-    $snmp -t 5 -Oe  -Oq  -Os -v 1 -c public localhost if | \
-    awk ' {
-      split($1,a,".") ;
-      if ( a[1] == "ifIndex" ) { ifIndex[a[2]] = $2 }
-      if ( a[1] == "ifDescr" ) { ifDescr[a[2]] = $2 }
-      if ( a[1] == "ifType" ) { ifType[a[2]] = $2 }
-      if ( a[1] == "ifSpeed" ) { ifSpeed[a[2]] = $2 }
-      if ( a[1] == "ifAdminStatus" ) { ifAdminStatus[a[2]] = $2 }
-      if ( a[1] == "ifOperStatus" ) { ifOperStatus[a[2]] = $2 }
-    }
-    END {
-      up=0;
-      for (i in ifIndex ) {
-      if ( ifType[i] == 6 && ifAdminStatus[i] == 1 && ifOperStatus[i] == 1 && ifSpeed[i] == 1000000000 ) {
-      up=i;
-      }
-      }
-      if ( up == 0 ) { print "check link" ; exit 2 }
-      else { print ifDescr[up],"ok" }
-    }'
-    exit $? ;
-  fi
-}
-
-# Run all checks
-# Disabled 'check_link' for now... 
-for check in disks taskcontroller jetty; do
-  msg=`check_${check}` ;
-  if [ $? -eq 0 ] ; then
-    ok_msg="$ok_msg$msg,"
-  else
-    err_msg="$err_msg$msg,"
-  fi
-done
-
-if [ ! -z "$err_msg" ] ; then
-  echo -n "ERROR $err_msg "
-fi
-if [ ! -z "$ok_msg" ] ; then
-  echo -n "OK: $ok_msg"
-fi
-
-echo
-
-# Success!
-exit 0

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/include_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/include_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/include_hosts_list.j2
deleted file mode 100644
index cbcf6c3..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/include_hosts_list.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-{% for host in slave_hosts %}
-{{host}}
-{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/log4j.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/log4j.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/log4j.properties.j2
deleted file mode 100644
index 6c02292..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/log4j.properties.j2
+++ /dev/null
@@ -1,218 +0,0 @@
-# Copyright 2011 The Apache Software Foundation
-# 
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# Define some default values that can be overridden by system properties
-hadoop.root.logger=INFO,console
-hadoop.log.dir=.
-hadoop.log.file=hadoop.log
-
-
-# Define the root logger to the system property "hadoop.root.logger".
-log4j.rootLogger=${hadoop.root.logger}, EventCounter
-
-# Logging Threshold
-log4j.threshhold=ALL
-
-#
-# Daily Rolling File Appender
-#
-
-log4j.appender.DRFA=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Rollver at midnight
-log4j.appender.DRFA.DatePattern=.yyyy-MM-dd
-
-# 30-day backup
-#log4j.appender.DRFA.MaxBackupIndex=30
-log4j.appender.DRFA.layout=org.apache.log4j.PatternLayout
-
-# Pattern format: Date LogLevel LoggerName LogMessage
-log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-# Debugging Pattern format
-#log4j.appender.DRFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-#
-# console
-# Add "console" to rootlogger above if you want to use this 
-#
-
-log4j.appender.console=org.apache.log4j.ConsoleAppender
-log4j.appender.console.target=System.err
-log4j.appender.console.layout=org.apache.log4j.PatternLayout
-log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-
-#
-# TaskLog Appender
-#
-
-#Default values
-hadoop.tasklog.taskid=null
-hadoop.tasklog.iscleanup=false
-hadoop.tasklog.noKeepSplits=4
-hadoop.tasklog.totalLogFileSize=100
-hadoop.tasklog.purgeLogSplits=true
-hadoop.tasklog.logsRetainHours=12
-
-log4j.appender.TLA=org.apache.hadoop.mapred.TaskLogAppender
-log4j.appender.TLA.taskId=${hadoop.tasklog.taskid}
-log4j.appender.TLA.isCleanup=${hadoop.tasklog.iscleanup}
-log4j.appender.TLA.totalLogFileSize=${hadoop.tasklog.totalLogFileSize}
-
-log4j.appender.TLA.layout=org.apache.log4j.PatternLayout
-log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-
-#
-#Security audit appender
-#
-hadoop.security.logger=INFO,console
-hadoop.security.log.maxfilesize=256MB
-hadoop.security.log.maxbackupindex=20
-log4j.category.SecurityLogger=${hadoop.security.logger}
-hadoop.security.log.file=SecurityAuth.audit
-log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender 
-log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
-log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
-log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
-
-log4j.appender.RFAS=org.apache.log4j.RollingFileAppender 
-log4j.appender.RFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
-log4j.appender.RFAS.layout=org.apache.log4j.PatternLayout
-log4j.appender.RFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-log4j.appender.RFAS.MaxFileSize=${hadoop.security.log.maxfilesize}
-log4j.appender.RFAS.MaxBackupIndex=${hadoop.security.log.maxbackupindex}
-
-#
-# hdfs audit logging
-#
-hdfs.audit.logger=INFO,console
-log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
-log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
-log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
-log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
-log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
-
-#
-# mapred audit logging
-#
-mapred.audit.logger=INFO,console
-log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
-log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
-log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
-log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
-log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
-
-#
-# Rolling File Appender
-#
-
-log4j.appender.RFA=org.apache.log4j.RollingFileAppender
-log4j.appender.RFA.File=${hadoop.log.dir}/${hadoop.log.file}
-
-# Logfile size and and 30-day backups
-log4j.appender.RFA.MaxFileSize=256MB
-log4j.appender.RFA.MaxBackupIndex=10
-
-log4j.appender.RFA.layout=org.apache.log4j.PatternLayout
-log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
-log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
-
-
-# Custom Logging levels
-
-hadoop.metrics.log.level=INFO
-#log4j.logger.org.apache.hadoop.mapred.JobTracker=DEBUG
-#log4j.logger.org.apache.hadoop.mapred.TaskTracker=DEBUG
-#log4j.logger.org.apache.hadoop.fs.FSNamesystem=DEBUG
-log4j.logger.org.apache.hadoop.metrics2=${hadoop.metrics.log.level}
-
-# Jets3t library
-log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
-
-#
-# Null Appender
-# Trap security logger on the hadoop client side
-#
-log4j.appender.NullAppender=org.apache.log4j.varia.NullAppender
-
-#
-# Event Counter Appender
-# Sends counts of logging messages at different severity levels to Hadoop Metrics.
-#
-log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
-
-{% if is_jtnode_master or is_rmnode_master %}
-#
-# Job Summary Appender 
-#
-# Use following logger to send summary to separate file defined by 
-# hadoop.mapreduce.jobsummary.log.file rolled daily:
-# hadoop.mapreduce.jobsummary.logger=INFO,JSA
-# 
-hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
-hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
-log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender
-# Set the ResourceManager summary log filename
-yarn.server.resourcemanager.appsummary.log.file=hadoop-mapreduce.jobsummary.log
-# Set the ResourceManager summary log level and appender
-yarn.server.resourcemanager.appsummary.logger=${hadoop.root.logger}
-#yarn.server.resourcemanager.appsummary.logger=INFO,RMSUMMARY
-
-# To enable AppSummaryLogging for the RM,
-# set yarn.server.resourcemanager.appsummary.logger to
-# <LEVEL>,RMSUMMARY in hadoop-env.sh
-
-# Appender for ResourceManager Application Summary Log
-# Requires the following properties to be set
-#    - hadoop.log.dir (Hadoop Log directory)
-#    - yarn.server.resourcemanager.appsummary.log.file (resource manager app summary log filename)
-#    - yarn.server.resourcemanager.appsummary.logger (resource manager app summary log level and appender)
-log4j.appender.RMSUMMARY=org.apache.log4j.RollingFileAppender
-log4j.appender.RMSUMMARY.File={{yarn_log_dir_prefix}}/{{yarn_user}}/${yarn.server.resourcemanager.appsummary.log.file}
-log4j.appender.RMSUMMARY.MaxFileSize=256MB
-log4j.appender.RMSUMMARY.MaxBackupIndex=20
-log4j.appender.RMSUMMARY.layout=org.apache.log4j.PatternLayout
-log4j.appender.RMSUMMARY.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
-log4j.appender.JSA.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{2}: %m%n
-log4j.appender.JSA.DatePattern=.yyyy-MM-dd
-log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
-log4j.logger.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=${yarn.server.resourcemanager.appsummary.logger}
-log4j.additivity.org.apache.hadoop.yarn.server.resourcemanager.RMAppManager$ApplicationSummary=false
-{% endif %}
-
-{{rca_prefix}}ambari.jobhistory.database={{ambari_db_rca_url}}
-{{rca_prefix}}ambari.jobhistory.driver={{ambari_db_rca_driver}}
-{{rca_prefix}}ambari.jobhistory.user={{ambari_db_rca_username}}
-{{rca_prefix}}ambari.jobhistory.password={{ambari_db_rca_password}}
-{{rca_prefix}}ambari.jobhistory.logger=DEBUG,JHA
-
-{{rca_prefix}}log4j.appender.JHA=org.apache.ambari.log4j.hadoop.mapreduce.jobhistory.JobHistoryAppender
-{{rca_prefix}}log4j.appender.JHA.database=${ambari.jobhistory.database}
-{{rca_prefix}}log4j.appender.JHA.driver=${ambari.jobhistory.driver}
-{{rca_prefix}}log4j.appender.JHA.user=${ambari.jobhistory.user}
-{{rca_prefix}}log4j.appender.JHA.password=${ambari.jobhistory.password}
-
-{{rca_prefix}}log4j.logger.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger=${ambari.jobhistory.logger}
-{{rca_prefix}}log4j.additivity.org.apache.hadoop.mapred.JobHistory$JobHistoryLogger=true

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/slaves.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/slaves.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/slaves.j2
deleted file mode 100644
index cbcf6c3..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/slaves.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-{% for host in slave_hosts %}
-{{host}}
-{% endfor %}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/snmpd.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/snmpd.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/snmpd.conf.j2
deleted file mode 100644
index 3530444..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/snmpd.conf.j2
+++ /dev/null
@@ -1,48 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-com2sec notConfigUser  {{snmp_source}}   {{snmp_community}}
-group   notConfigGroup v1           notConfigUser
-group   notConfigGroup v2c           notConfigUser
-view    systemview    included   .1
-access  notConfigGroup ""      any       noauth    exact  systemview none none
-
-syslocation Hadoop 
-syscontact HadoopMaster 
-dontLogTCPWrappersConnects yes
-
-###############################################################################
-# disk checks
-
-disk / 10000
-
-
-###############################################################################
-# load average checks
-#
-
-# load [1MAX=12.0] [5MAX=12.0] [15MAX=12.0]
-#
-# 1MAX:   If the 1 minute load average is above this limit at query
-#         time, the errorFlag will be set.
-# 5MAX:   Similar, but for 5 min average.
-# 15MAX:  Similar, but for 15 min average.
-
-# Check for loads:
-#load 12 14 14
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/taskcontroller.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/taskcontroller.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/taskcontroller.cfg.j2
deleted file mode 100644
index d01d37e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/hooks/before-START/templates/taskcontroller.cfg.j2
+++ /dev/null
@@ -1,20 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-mapred.local.dir={{mapred_local_dir}}
-mapreduce.tasktracker.group={{mapred_tt_group}}
-hadoop.log.dir={{hdfs_log_dir_prefix}}/{{mapred_user}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.8/metainfo.xml
deleted file mode 100644
index ca45822..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/metainfo.xml
+++ /dev/null
@@ -1,22 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-    <versions>
-	  <active>true</active>
-    </versions>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/repos/repoinfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/repos/repoinfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.8/repos/repoinfo.xml
deleted file mode 100644
index 97e14bd..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/repos/repoinfo.xml
+++ /dev/null
@@ -1,75 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<reposinfo>
-  <os type="centos6">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0</baseurl>
-      <repoid>HDP-2.0.8</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="centos5">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos5/2.x/updates/2.0.6.0</baseurl>
-      <repoid>HDP-2.0.8</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="redhat6">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0</baseurl>
-      <repoid>HDP-2.0.8</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="redhat5">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos5/2.x/updates/2.0.6.0</baseurl>
-      <repoid>HDP-2.0.8</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="oraclelinux6">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos6/2.x/updates/2.0.6.0</baseurl>
-      <repoid>HDP-2.0.8</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="oraclelinux5">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/centos5/2.x/updates/2.0.6.0</baseurl>
-      <repoid>HDP-2.0.8</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="suse11">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/suse11/2.x/updates/2.0.6.0</baseurl>
-      <repoid>HDP-2.0.8</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-  <os type="sles11">
-    <repo>
-      <baseurl>http://public-repo-1.hortonworks.com/HDP/suse11/2.x/updates/2.0.6.0</baseurl>
-      <repoid>HDP-2.0.8</repoid>
-      <reponame>HDP</reponame>
-    </repo>
-  </os>
-</reposinfo>


[24/37] AMBARI-4341. Rename 2.0.8 to 2.1.1 in the stack definition. (mahadev)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/metrics.json
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/metrics.json b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/metrics.json
deleted file mode 100644
index f33a0c0..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/metrics.json
+++ /dev/null
@@ -1,7800 +0,0 @@
-{
-  "NAMENODE": {
-    "Component": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "metrics/dfs/FSNamesystem/TotalLoad": {
-            "metric": "dfs.FSNamesystem.TotalLoad",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/BlockCapacity": {
-            "metric": "dfs.FSNamesystem.BlockCapacity",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/GetListingOps": {
-            "metric": "dfs.namenode.GetListingOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FilesAppended": {
-            "metric": "dfs.namenode.FilesAppended",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getProtocolVersion_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_num_ops",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/fsync_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.FsyncAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginSuccess_avg_time": {
-            "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/load/load_one": {
-            "metric": "load_one",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/renewLease_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.RenewLeaseNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getFileInfo_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.GetFileInfoAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memNonHeapUsedM": {
-            "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/complete_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.CompleteAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setPermission_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.SetPermissionNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CapacityTotalGB": {
-            "metric": "dfs.FSNamesystem.CapacityTotalGB",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setOwner_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.SetOwnerNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getBlockLocations_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.GetBlockLocationsNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/process/proc_run": {
-            "metric": "proc_run",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CapacityUsedGB": {
-            "metric": "dfs.FSNamesystem.CapacityUsedGB",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/AddBlockOps": {
-            "metric": "dfs.namenode.AddBlockOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/swap_total": {
-            "metric": "swap_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FilesDeleted": {
-            "metric": "dfs.namenode.FilesDeleted",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/Syncs_avg_time": {
-            "metric": "dfs.namenode.SyncsAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsBlocked": {
-            "metric": "jvm.JvmMetrics.ThreadsBlocked",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcQueueTime_num_ops": {
-            "metric": "rpc.rpc.RpcQueueTimeNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/process/proc_total": {
-            "metric": "proc_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/blockReport_avg_time": {
-            "metric": "dfs.namenode.BlockReportAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/disk/part_max_used": {
-            "metric": "part_max_used",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getFileInfo_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.GetFileInfoNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getEditLogSize_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginSuccess_num_ops": {
-            "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blockReceived_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.BlockReceivedAndDeletedAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_idle": {
-            "metric": "cpu_idle",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/versionRequest_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.VersionRequestAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_aidle": {
-            "metric": "cpu_aidle",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_free": {
-            "metric": "mem_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/versionRequest_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.VersionRequestNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/addBlock_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.AddBlockNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FilesCreated": {
-            "metric": "dfs.namenode.FilesCreated",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rename_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.RenameAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/bytes_in": {
-            "metric": "bytes_in",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setSafeMode_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.SetSafeModeNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/pkts_out": {
-            "metric": "pkts_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/memNonHeapCommittedM": {
-            "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_cached": {
-            "metric": "mem_cached",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/disk/disk_total": {
-            "metric": "disk_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setPermission_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.SetPermissionAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FilesRenamed": {
-            "metric": "dfs.namenode.FilesRenamed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/register_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.RegisterDatanodeAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setReplication_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.setReplication_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/JournalTransactionsBatchedInSync": {
-            "metric": "dfs.namenode.JournalTransactionsBatchedInSync",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/ugi/loginFailure_num_ops": {
-            "metric": "ugi.UgiMetrics.LoginFailureNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/GetBlockLocations": {
-            "metric": "dfs.namenode.GetBlockLocations",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/fsync_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.FsyncNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_wio": {
-            "metric": "cpu_wio",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/create_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.CreateAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/PendingReplicationBlocks": {
-            "metric": "dfs.FSNamesystem.PendingReplicationBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_speed": {
-            "metric": "cpu_speed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/delete_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.DeleteAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FileInfoOps": {
-            "metric": "dfs.namenode.FileInfoOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/sendHeartbeat_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.SendHeartbeatNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/DeleteFileOps": {
-            "metric": "dfs.namenode.DeleteFileOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/RpcProcessingTime_avg_time": {
-            "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blockReport_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.BlockReportNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setSafeMode_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.SetSafeModeAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthenticationSuccesses": {
-            "metric": "rpc.rpc.RpcAuthenticationSuccesses",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/PendingDeletionBlocks": {
-            "metric": "dfs.FSNamesystem.PendingDeletionBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthenticationFailures": {
-            "metric": "rpc.rpc.RpcAuthenticationFailures",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/network/pkts_in": {
-            "metric": "pkts_in",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_total": {
-            "metric": "mem_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getEditLogSize_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memHeapCommittedM": {
-            "metric": "jvm.JvmMetrics.MemHeapCommittedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FilesInGetListingOps": {
-            "metric": "dfs.namenode.FilesInGetListingOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsRunnable": {
-            "metric": "jvm.JvmMetrics.ThreadsRunnable",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/complete_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.CompleteNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsNew": {
-            "metric": "jvm.JvmMetrics.ThreadsNew",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rollFsImage_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.rollFsImage_num_ops",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthorizationFailures": {
-            "metric": "rpc.rpc.RpcAuthorizationFailures",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/Syncs_num_ops": {
-            "metric": "dfs.namenode.SyncsNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcQueueTime_avg_time": {
-            "metric": "rpc.rpc.RpcQueueTimeAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blockReceived_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.BlockReceivedAndDeletedNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setReplication_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.setReplication_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rollEditLog_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.RollEditLogAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/SentBytes": {
-            "metric": "rpc.rpc.SentBytes",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/FilesTotal": {
-            "metric": "dfs.FSNamesystem.FilesTotal",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/logWarn": {
-            "metric": "jvm.JvmMetrics.LogWarn",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/ExcessBlocks": {
-            "metric": "dfs.FSNamesystem.ExcessBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsTimedWaiting": {
-            "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/gcCount": {
-            "metric": "jvm.JvmMetrics.GcCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/ReceivedBytes": {
-            "metric": "rpc.rpc.ReceivedBytes",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_nice": {
-            "metric": "cpu_nice",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/blockReport_num_ops": {
-            "metric": "dfs.namenode.BlockReportNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/SafemodeTime": {
-            "metric": "dfs.namenode.SafemodeTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rollFsImage_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.rollFsImage_avg_time",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/mkdirs_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.MkdirsAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/NumOpenConnections": {
-            "metric": "rpc.rpc.NumOpenConnections",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memHeapUsedM": {
-            "metric": "jvm.JvmMetrics.MemHeapUsedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/ScheduledReplicationBlocks": {
-            "metric": "dfs.FSNamesystem.ScheduledReplicationBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsWaiting": {
-            "metric": "jvm.JvmMetrics.ThreadsWaiting",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/disk/disk_free": {
-            "metric": "disk_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/BlocksTotal": {
-            "metric": "dfs.FSNamesystem.BlocksTotal",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_buffers": {
-            "metric": "mem_buffers",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/gcTimeMillis": {
-            "metric": "jvm.JvmMetrics.GcTimeMillis",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getBlockLocations_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.GetBlockLocationsAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/Transactions_num_ops": {
-            "metric": "dfs.namenode.TransactionsNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/create_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.CreateNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsTerminated": {
-            "metric": "jvm.JvmMetrics.ThreadsTerminated",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/network/bytes_out": {
-            "metric": "bytes_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_user": {
-            "metric": "cpu_user",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/swap_free": {
-            "metric": "swap_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/load/load_five": {
-            "metric": "load_five",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_system": {
-            "metric": "cpu_system",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CapacityRemainingGB": {
-            "metric": "dfs.FSNamesystem.CapacityRemainingGB",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/Transactions_avg_time": {
-            "metric": "dfs.namenode.TransactionsAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/boottime": {
-            "metric": "boottime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/MissingBlocks": {
-            "metric": "dfs.FSNamesystem.MissingBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/callQueueLen": {
-            "metric": "rpc.rpc.CallQueueLength",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/delete_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.DeleteNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CorruptBlocks": {
-            "metric": "dfs.FSNamesystem.CorruptBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rename_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.RenameNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blockReport_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.BlockReportAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/mkdirs_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.MkdirsNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/load/load_fifteen": {
-            "metric": "load_fifteen",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/logInfo": {
-            "metric": "jvm.JvmMetrics.LogInfo",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/fsImageLoadTime": {
-            "metric": "dfs.namenode.FsImageLoadTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getListing_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.GetListingNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blocksBeingWrittenReport_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.blocksBeingWrittenReport_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rollEditLog_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.RollEditLogNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/addBlock_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.AddBlockAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blocksBeingWrittenReport_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.blocksBeingWrittenReport_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setOwner_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.SetOwnerAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/RpcProcessingTime_num_ops": {
-            "metric": "rpc.rpc.RpcProcessingTimeNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_shared": {
-            "metric": "mem_shared",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/UnderReplicatedBlocks": {
-            "metric": "dfs.FSNamesystem.UnderReplicatedBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/sendHeartbeat_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.SendHeartbeatAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/CreateFileOps": {
-            "metric": "dfs.namenode.CreateFileOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/logError": {
-            "metric": "jvm.JvmMetrics.LogError",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginFailure_avg_time": {
-            "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_num": {
-            "metric": "cpu_num",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getProtocolVersion_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_avg_time",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/register_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.RegisterDatanodeNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthorizationSuccesses": {
-            "metric": "rpc.rpc.RpcAuthorizationSuccesses",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getListing_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.GetListingAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/logFatal": {
-            "metric": "jvm.JvmMetrics.LogFatal",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/renewLease_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.RenewLeaseAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          }
-        }
-      },
-      {
-        "type": "jmx",
-        "metrics": {
-          "metrics/dfs/namenode/Used": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Used",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/TotalLoad": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.TotalLoad",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memMaxM":{
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemMaxM",
-            "pointInTime" : true,
-            "temporal" : false
-          },
-          "metrics/dfs/FSNamesystem/BlockCapacity": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.BlockCapacity",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/TotalFiles": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalFiles",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/HostName": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.HostName",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/GetListingOps": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.GetListingOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/UpgradeFinalized": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.UpgradeFinalized",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getProtocolVersion_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getProtocolVersion_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/fsync_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.fsync_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/ugi/loginSuccess_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=ugi.loginSuccess_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/Safemode": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Safemode",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/CorruptBlocks": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CorruptBlocks",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/LiveNodes": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.LiveNodes",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/renewLease_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.renewLease_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getFileInfo_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getFileInfo_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/CapacityRemaining": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CapacityRemaining",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/PercentRemaining": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentRemaining",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memNonHeapUsedM": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemNonHeapUsedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/complete_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.complete_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/CapacityTotalGB": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CapacityTotalGB",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getBlockLocations_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getBlockLocations_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/AddBlockOps": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.AddBlockOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/CapacityUsedGB": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CapacityUsedGB",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/Syncs_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.Syncs_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsBlocked": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsBlocked",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/RpcQueueTime_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcQueueTime_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/PercentUsed": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentUsed",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/DecomNodes": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DecomNodes",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/blockReport_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.blockReport_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/NonDfsUsedSpace": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.NonDfsUsedSpace",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/UpgradeFinalized": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.UpgradeFinalized",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getFileInfo_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getFileInfo_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getEditLogSize_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getEditLogSize_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/ugi/loginSuccess_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=ugi.loginSuccess_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/blockReceived_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReceived_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/Safemode": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Safemode",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/FilesCreated": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.FilesCreated",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/addBlock_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.addBlock_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/DecomNodes": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DecomNodes",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/CapacityUsed": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CapacityUsed",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/NonHeapMemoryUsed": {
-            "metric": "java.lang:type=Memory.NonHeapMemoryUsage[used]",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memNonHeapCommittedM": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemNonHeapCommittedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/DeadNodes": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DeadNodes",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/PercentUsed": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentUsed",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/Free": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Free",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/Total": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Total",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/GetBlockLocations": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.GetBlockLocations",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/ugi/loginFailure_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=ugi.loginFailure_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/fsync_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.fsync_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/HeapMemoryMax": {
-            "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/create_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.create_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/PendingReplicationBlocks": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.PendingReplicationBlocks",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/UnderReplicatedBlocks": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.UnderReplicatedBlocks",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/FileInfoOps": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.FileInfoOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/MissingBlocks": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.MissingBlocks",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/sendHeartbeat_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.sendHeartbeat_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/RpcProcessingTime_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcProcessingTime_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/blockReport_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReport_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/CapacityRemaining": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemState.CapacityRemaining",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/rpcAuthenticationSuccesses": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthenticationSuccesses",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/PendingDeletionBlocks": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.PendingDeletionBlocks",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/rpcAuthenticationFailures": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthenticationFailures",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getEditLogSize_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getEditLogSize_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memHeapCommittedM": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemHeapCommittedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/FilesInGetListingOps": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.FilesInGetListingOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsRunnable": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsRunnable",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/BlocksTotal": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.BlocksTotal",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/CapacityTotal": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CapacityTotal",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/complete_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.complete_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/LiveNodes": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.LiveNodes",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsNew": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsNew",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/rollFsImage_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollFsImage_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/rpcAuthorizationFailures": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthorizationFailures",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/Syncs_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.Syncs_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/StartTime": {
-            "metric": "java.lang:type=Runtime.StartTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/RpcQueueTime_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcQueueTime_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/blockReceived_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReceived_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/rollEditLog_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollEditLog_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/DeadNodes": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.DeadNodes",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/SentBytes": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.SentBytes",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/HeapMemoryUsed": {
-            "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/FilesTotal": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.FilesTotal",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/Version": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Version",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/logWarn": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogWarn",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/ExcessBlocks": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.ExcessBlocks",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsTimedWaiting": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsTimedWaiting",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/gcCount": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.GcCount",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/PercentRemaining": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentRemaining",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/ReceivedBytes": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.ReceivedBytes",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/blockReport_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.blockReport_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/NonHeapMemoryMax": {
-            "metric": "java.lang:type=Memory.NonHeapMemoryUsage[max]",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/rollFsImage_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollFsImage_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/NumOpenConnections": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.NumOpenConnections",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memHeapUsedM": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemHeapUsedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/ScheduledReplicationBlocks": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.ScheduledReplicationBlocks",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsWaiting": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsWaiting",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/BlocksTotal": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.BlocksTotal",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/gcTimeMillis": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.GcTimeMillis",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getBlockLocations_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getBlockLocations_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/Transactions_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.Transactions_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/create_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.create_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/CapacityTotal": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Total",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsTerminated": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsTerminated",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/CapacityRemainingGB": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CapacityRemainingGB",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/Transactions_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.Transactions_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/MissingBlocks": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.MissingBlocks",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/Threads": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Threads",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/callQueueLen": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.callQueueLen",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/CorruptBlocks": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CorruptBlocks",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/blockReport_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.blockReport_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/TotalFiles": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalFiles",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/logInfo": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogInfo",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/NameDirStatuses": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.NameDirStatuses",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getListing_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getListing_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/rollEditLog_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.rollEditLog_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/addBlock_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.addBlock_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/RpcProcessingTime_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcProcessingTime_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/CapacityUsed": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Used",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/UnderReplicatedBlocks": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.UnderReplicatedBlocks",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/sendHeartbeat_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.sendHeartbeat_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/CreateFileOps": {
-            "metric": "Hadoop:service=NameNode,name=NameNode.CreateFileOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/logError": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogError",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/ugi/loginFailure_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=ugi.loginFailure_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getProtocolVersion_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getProtocolVersion_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/rpcAuthorizationSuccesses": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.rpcAuthorizationSuccesses",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "ServiceComponentInfo/Version": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Version",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getListing_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.getListing_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/logFatal": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.LogFatal",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/NonDfsUsedSpace": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.NonDfsUsedSpace",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/renewLease_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.renewLease_avg_time",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/TotalBlocks": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalBlocks",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/CapacityNonDFSUsed": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystemMetrics.CapacityNonDFSUsed",
-            "pointInTime": true,
-            "temporal": false
-          }
-        }
-      }
-    ],
-    "HostComponent": [
-      {
-        "type": "ganglia",
-        "metrics": {
-          "metrics/dfs/FSNamesystem/TotalLoad": {
-            "metric": "dfs.FSNamesystem.TotalLoad",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/BlockCapacity": {
-            "metric": "dfs.FSNamesystem.BlockCapacity",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/GetListingOps": {
-            "metric": "dfs.namenode.GetListingOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FilesAppended": {
-            "metric": "dfs.namenode.FilesAppended",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getProtocolVersion_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_num_ops",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/fsync_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.FsyncAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginSuccess_avg_time": {
-            "metric": "ugi.UgiMetrics.LoginSuccessAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/load/load_one": {
-            "metric": "load_one",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/renewLease_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.RenewLeaseNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getFileInfo_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.GetFileInfoAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memNonHeapUsedM": {
-            "metric": "jvm.JvmMetrics.MemNonHeapUsedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/complete_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.CompleteAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setPermission_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.SetPermissionNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CapacityTotalGB": {
-            "metric": "dfs.FSNamesystem.CapacityTotalGB",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setOwner_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.SetOwnerNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getBlockLocations_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.GetBlockLocationsNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/process/proc_run": {
-            "metric": "proc_run",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CapacityUsedGB": {
-            "metric": "dfs.FSNamesystem.CapacityUsedGB",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/AddBlockOps": {
-            "metric": "dfs.namenode.AddBlockOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/swap_total": {
-            "metric": "swap_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FilesDeleted": {
-            "metric": "dfs.namenode.FilesDeleted",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/Syncs_avg_time": {
-            "metric": "dfs.namenode.SyncsAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsBlocked": {
-            "metric": "jvm.JvmMetrics.ThreadsBlocked",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcQueueTime_num_ops": {
-            "metric": "rpc.rpc.RpcQueueTimeNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/process/proc_total": {
-            "metric": "proc_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/blockReport_avg_time": {
-            "metric": "dfs.namenode.BlockReportAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/disk/part_max_used": {
-            "metric": "part_max_used",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getFileInfo_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.GetFileInfoNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getEditLogSize_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginSuccess_num_ops": {
-            "metric": "ugi.UgiMetrics.LoginSuccessNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blockReceived_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.BlockReceivedAndDeletedAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_idle": {
-            "metric": "cpu_idle",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/versionRequest_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.VersionRequestAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_aidle": {
-            "metric": "cpu_aidle",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_free": {
-            "metric": "mem_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/versionRequest_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.VersionRequestNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/addBlock_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.AddBlockNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FilesCreated": {
-            "metric": "dfs.namenode.FilesCreated",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rename_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.RenameAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/bytes_in": {
-            "metric": "bytes_in",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setSafeMode_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.SetSafeModeNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/network/pkts_out": {
-            "metric": "pkts_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/memNonHeapCommittedM": {
-            "metric": "jvm.JvmMetrics.MemNonHeapCommittedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_cached": {
-            "metric": "mem_cached",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/disk/disk_total": {
-            "metric": "disk_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setPermission_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.SetPermissionAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FilesRenamed": {
-            "metric": "dfs.namenode.FilesRenamed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/register_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.RegisterDatanodeAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setReplication_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.setReplication_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/JournalTransactionsBatchedInSync": {
-            "metric": "dfs.namenode.JournalTransactionsBatchedInSync",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/ugi/loginFailure_num_ops": {
-            "metric": "ugi.UgiMetrics.LoginFailureNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/GetBlockLocations": {
-            "metric": "dfs.namenode.GetBlockLocations",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/fsync_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.FsyncNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_wio": {
-            "metric": "cpu_wio",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/create_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.CreateAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/PendingReplicationBlocks": {
-            "metric": "dfs.FSNamesystem.PendingReplicationBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_speed": {
-            "metric": "cpu_speed",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/delete_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.DeleteAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FileInfoOps": {
-            "metric": "dfs.namenode.FileInfoOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/sendHeartbeat_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.SendHeartbeatNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/DeleteFileOps": {
-            "metric": "dfs.namenode.DeleteFileOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/RpcProcessingTime_avg_time": {
-            "metric": "rpc.rpc.RpcProcessingTimeAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blockReport_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.BlockReportNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setSafeMode_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.SetSafeModeAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthenticationSuccesses": {
-            "metric": "rpc.rpc.RpcAuthenticationSuccesses",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/PendingDeletionBlocks": {
-            "metric": "dfs.FSNamesystem.PendingDeletionBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthenticationFailures": {
-            "metric": "rpc.rpc.RpcAuthenticationFailures",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/network/pkts_in": {
-            "metric": "pkts_in",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/mem_total": {
-            "metric": "mem_total",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getEditLogSize_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.GetEditLogManifestNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memHeapCommittedM": {
-            "metric": "jvm.JvmMetrics.MemHeapCommittedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/FilesInGetListingOps": {
-            "metric": "dfs.namenode.FilesInGetListingOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsRunnable": {
-            "metric": "jvm.JvmMetrics.ThreadsRunnable",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/complete_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.CompleteNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsNew": {
-            "metric": "jvm.JvmMetrics.ThreadsNew",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rollFsImage_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.rollFsImage_num_ops",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthorizationFailures": {
-            "metric": "rpc.rpc.RpcAuthorizationFailures",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/Syncs_num_ops": {
-            "metric": "dfs.namenode.SyncsNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/RpcQueueTime_avg_time": {
-            "metric": "rpc.rpc.RpcQueueTimeAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blockReceived_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.BlockReceivedAndDeletedNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setReplication_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.setReplication_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rollEditLog_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.RollEditLogAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/SentBytes": {
-            "metric": "rpc.rpc.SentBytes",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/FilesTotal": {
-            "metric": "dfs.FSNamesystem.FilesTotal",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/logWarn": {
-            "metric": "jvm.JvmMetrics.LogWarn",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/ExcessBlocks": {
-            "metric": "dfs.FSNamesystem.ExcessBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsTimedWaiting": {
-            "metric": "jvm.JvmMetrics.ThreadsTimedWaiting",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/gcCount": {
-            "metric": "jvm.JvmMetrics.GcCount",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/ReceivedBytes": {
-            "metric": "rpc.rpc.ReceivedBytes",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_nice": {
-            "metric": "cpu_nice",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/blockReport_num_ops": {
-            "metric": "dfs.namenode.BlockReportNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/SafemodeTime": {
-            "metric": "dfs.namenode.SafemodeTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rollFsImage_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.rollFsImage_avg_time",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/mkdirs_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.MkdirsAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/NumOpenConnections": {
-            "metric": "rpc.rpc.NumOpenConnections",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/memHeapUsedM": {
-            "metric": "jvm.JvmMetrics.MemHeapUsedM",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/ScheduledReplicationBlocks": {
-            "metric": "dfs.FSNamesystem.ScheduledReplicationBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsWaiting": {
-            "metric": "jvm.JvmMetrics.ThreadsWaiting",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/disk/disk_free": {
-            "metric": "disk_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/BlocksTotal": {
-            "metric": "dfs.FSNamesystem.BlocksTotal",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_buffers": {
-            "metric": "mem_buffers",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/gcTimeMillis": {
-            "metric": "jvm.JvmMetrics.GcTimeMillis",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getBlockLocations_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.GetBlockLocationsAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/Transactions_num_ops": {
-            "metric": "dfs.namenode.TransactionsNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/create_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.CreateNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/threadsTerminated": {
-            "metric": "jvm.JvmMetrics.ThreadsTerminated",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/network/bytes_out": {
-            "metric": "bytes_out",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_user": {
-            "metric": "cpu_user",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/memory/swap_free": {
-            "metric": "swap_free",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/load/load_five": {
-            "metric": "load_five",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_system": {
-            "metric": "cpu_system",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CapacityRemainingGB": {
-            "metric": "dfs.FSNamesystem.CapacityRemainingGB",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/Transactions_avg_time": {
-            "metric": "dfs.namenode.TransactionsAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/boottime": {
-            "metric": "boottime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/MissingBlocks": {
-            "metric": "dfs.FSNamesystem.MissingBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpc/callQueueLen": {
-            "metric": "rpc.rpc.CallQueueLength",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/delete_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.DeleteNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/CorruptBlocks": {
-            "metric": "dfs.FSNamesystem.CorruptBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rename_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.RenameNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blockReport_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.BlockReportAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/mkdirs_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.MkdirsNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/load/load_fifteen": {
-            "metric": "load_fifteen",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/jvm/logInfo": {
-            "metric": "jvm.JvmMetrics.LogInfo",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/fsImageLoadTime": {
-            "metric": "dfs.namenode.FsImageLoadTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getListing_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.GetListingNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blocksBeingWrittenReport_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.blocksBeingWrittenReport_num_ops",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/rollEditLog_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.RollEditLogNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/addBlock_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.AddBlockAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/blocksBeingWrittenReport_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.blocksBeingWrittenReport_avg_time",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/setOwner_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.SetOwnerAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/RpcProcessingTime_num_ops": {
-            "metric": "rpc.rpc.RpcProcessingTimeNumOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/memory/mem_shared": {
-            "metric": "mem_shared",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/dfs/FSNamesystem/UnderReplicatedBlocks": {
-            "metric": "dfs.FSNamesystem.UnderReplicatedBlocks",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/sendHeartbeat_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.SendHeartbeatAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/dfs/namenode/CreateFileOps": {
-            "metric": "dfs.namenode.CreateFileOps",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/logError": {
-            "metric": "jvm.JvmMetrics.LogError",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/ugi/loginFailure_avg_time": {
-            "metric": "ugi.UgiMetrics.LoginFailureAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/cpu/cpu_num": {
-            "metric": "cpu_num",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getProtocolVersion_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.getProtocolVersion_avg_time",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/register_num_ops": {
-            "metric": "rpcdetailed.rpcdetailed.RegisterDatanodeNumOps",
-            "pointInTime": true,
-            "temporal": true
-          },
-          "metrics/rpc/rpcAuthorizationSuccesses": {
-            "metric": "rpc.rpc.RpcAuthorizationSuccesses",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/getListing_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.GetListingAvgTime",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/jvm/logFatal": {
-            "metric": "jvm.JvmMetrics.LogFatal",
-            "pointInTime": false,
-            "temporal": true
-          },
-          "metrics/rpcdetailed/renewLease_avg_time": {
-            "metric": "rpcdetailed.rpcdetailed.RenewLeaseAvgTime",
-            "pointInTime": true,
-            "temporal": true
-          }
-        }
-      },
-      {
-        "type": "jmx",
-        "metrics": {
-          "metrics/dfs/namenode/Used": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.Used",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/TotalLoad": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.TotalLoad",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memMaxM":{
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemMaxM",
-            "pointInTime" : true,
-            "temporal" : false
-          },
-          "metrics/dfs/FSNamesystem/BlockCapacity": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.BlockCapacity",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/TotalFiles": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.TotalFiles",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/GetListingOps": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeActivity.GetListingOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/HostName": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeActivity.tag.Hostname",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/runtime/StartTime": {
-            "metric": "java.lang:type=Runtime.StartTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/UpgradeFinalized": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.UpgradeFinalized",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getProtocolVersion_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.VersionRequestNumOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/fsync_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.FsyncAvgTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/ugi/loginSuccess_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=UgiMetrics.LoginSuccessAvgTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/renewLease_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.renewLease_num_ops",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/CapacityRemaining": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityRemaining",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getFileInfo_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.GetFileInfoAvgTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/PercentRemaining": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentRemaining",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/memNonHeapUsedM": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.MemNonHeapUsedM",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/complete_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.CompleteAvgTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/CapacityTotalGB": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityTotalGB",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getBlockLocations_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcDetailedActivity.GetBlockLocationsNumOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/FSNamesystem/CapacityUsedGB": {
-            "metric": "Hadoop:service=NameNode,name=FSNamesystem.CapacityUsedGB",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/AddBlockOps": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeActivity.AddBlockOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/Syncs_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeActivity.SyncsAvgTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/threadsBlocked": {
-            "metric": "Hadoop:service=NameNode,name=JvmMetrics.ThreadsBlocked",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/PercentUsed": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeInfo.PercentUsed",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpc/RpcQueueTime_num_ops": {
-            "metric": "Hadoop:service=NameNode,name=RpcActivity.RpcQueueTimeNumOps",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/dfs/namenode/blockReport_avg_time": {
-            "metric": "Hadoop:service=NameNode,name=NameNodeActivity.BlockReportAvgTime",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/HeapMemoryMax": {
-            "metric": "java.lang:type=Memory.HeapMemoryUsage[max]",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/jvm/HeapMemoryUsed": {
-            "metric": "java.lang:type=Memory.HeapMemoryUsage[used]",
-            "pointInTime": true,
-            "temporal": false
-          },
-          "metrics/rpcdetailed/getFileInfo_num_ops": {
-          

<TRUNCATED>

[27/37] AMBARI-4341. Rename 2.0.8 to 2.1.1 in the stack definition. (mahadev)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/setupGanglia.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/setupGanglia.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/setupGanglia.sh
deleted file mode 100644
index 5145b9c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/setupGanglia.sh
+++ /dev/null
@@ -1,141 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get access to Ganglia-wide constants, utilities etc.
-source ./gangliaLib.sh
-
-function usage()
-{
-  cat << END_USAGE
-Usage: ${0} [-c <gmondClusterName> [-m]] [-t] [-o <owner>] [-g <group>]
-
-Options:
-  -c <gmondClusterName>   The name of the Ganglia Cluster whose gmond configuration we're here to generate.
-
-  -m                      Whether this gmond (if -t is not specified) is the master for its Ganglia 
-                          Cluster. Without this, we generate slave gmond configuration.
-
-  -t                      Whether this is a call to generate gmetad configuration (as opposed to the
-                          gmond configuration that is generated without this).
-  -o <owner>              Owner
-  -g <group>              Group
-END_USAGE
-}
-
-function instantiateGmetadConf()
-{
-  # gmetad utility library.
-  source ./gmetadLib.sh;
-
-  generateGmetadConf > ${GMETAD_CONF_FILE};
-}
-
-function instantiateGmondConf()
-{
-  # gmond utility library.
-  source ./gmondLib.sh;
- 
-  gmondClusterName=${1};
-
-  if [ "x" != "x${gmondClusterName}" ]
-  then
-
-    createDirectory "${GANGLIA_RUNTIME_DIR}/${gmondClusterName}";
-    createDirectory "${GANGLIA_CONF_DIR}/${gmondClusterName}/conf.d";
-    
-    # Always blindly generate the core gmond config - that goes on every box running gmond. 
-    generateGmondCoreConf ${gmondClusterName} > `getGmondCoreConfFileName ${gmondClusterName}`;
-
-    isMasterGmond=${2};
-
-    # Decide whether we want to add on the master or slave gmond config.
-    if [ "0" -eq "${isMasterGmond}" ]
-    then
-      generateGmondSlaveConf ${gmondClusterName} > `getGmondSlaveConfFileName ${gmondClusterName}`;
-    else
-      generateGmondMasterConf ${gmondClusterName} > `getGmondMasterConfFileName ${gmondClusterName}`;
-    fi
-
-    chown -R ${3}:${4} ${GANGLIA_CONF_DIR}/${gmondClusterName}
-
-  else
-    echo "No gmondClusterName passed in, nothing to instantiate";
-  fi
-}
-
-# main()
-
-gmondClusterName=;
-isMasterGmond=0;
-configureGmetad=0;
-owner='root';
-group='root';
-
-while getopts ":c:mto:g:" OPTION
-do
-  case ${OPTION} in
-    c) 
-      gmondClusterName=${OPTARG};
-      ;;
-    m)
-      isMasterGmond=1;
-      ;;
-    t)
-      configureGmetad=1;
-      ;;
-    o)
-      owner=${OPTARG};
-      ;;
-    g)
-      group=${OPTARG};
-      ;;
-    ?)
-      usage;
-      exit 1;
-  esac
-done
-
-# Initialization.
-createDirectory ${GANGLIA_CONF_DIR};
-createDirectory ${GANGLIA_RUNTIME_DIR};
-# So rrdcached can drop its PID files in here.
-chmod a+w ${GANGLIA_RUNTIME_DIR};
-chown ${owner}:${group} ${GANGLIA_CONF_DIR};
-
-if [ -n "${gmondClusterName}" ]
-then
-
-  # Be forgiving of users who pass in -c along with -t (which always takes precedence).
-  if [ "1" -eq "${configureGmetad}" ]
-  then
-    instantiateGmetadConf;
-  else
-    instantiateGmondConf ${gmondClusterName} ${isMasterGmond} ${owner} ${group};
-  fi
-
-elif [ "1" -eq "${configureGmetad}" ]
-then
-  instantiateGmetadConf;
-else
-  usage;
-  exit 2;
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/startGmetad.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/startGmetad.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/startGmetad.sh
deleted file mode 100644
index ab5102d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/startGmetad.sh
+++ /dev/null
@@ -1,64 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./gmetadLib.sh;
-
-# To get access to ${RRDCACHED_ALL_ACCESS_UNIX_SOCKET}.
-source ./rrdcachedLib.sh;
-
-# Before starting gmetad, start rrdcached.
-./startRrdcached.sh;
-
-if [ $? -eq 0 ] 
-then
-    gmetadRunningPid=`getGmetadRunningPid`;
-
-    # Only attempt to start gmetad if there's not already one running.
-    if [ -z "${gmetadRunningPid}" ]
-    then
-        env RRDCACHED_ADDRESS=${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
-                    ${GMETAD_BIN} --conf=${GMETAD_CONF_FILE} --pid-file=${GMETAD_PID_FILE};
-
-        for i in `seq 0 5`; do
-          gmetadRunningPid=`getGmetadRunningPid`;
-          if [ -n "${gmetadRunningPid}" ]
-          then
-            break;
-          fi
-          sleep 1;
-        done
-
-        if [ -n "${gmetadRunningPid}" ]
-        then
-            echo "Started ${GMETAD_BIN} with PID ${gmetadRunningPid}";
-        else
-            echo "Failed to start ${GMETAD_BIN}";
-            exit 1;
-        fi
-    else
-        echo "${GMETAD_BIN} already running with PID ${gmetadRunningPid}";
-    fi
-else
-    echo "Not starting ${GMETAD_BIN} because starting ${RRDCACHED_BIN} failed.";
-    exit 2;
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/startGmond.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/startGmond.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/startGmond.sh
deleted file mode 100644
index 239b62e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/startGmond.sh
+++ /dev/null
@@ -1,80 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
-source ./gmondLib.sh;
-
-function startGmondForCluster()
-{
-    gmondClusterName=${1};
-
-    gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
-
-    # Only attempt to start gmond if there's not already one running.
-    if [ -z "${gmondRunningPid}" ]
-    then
-      gmondCoreConfFileName=`getGmondCoreConfFileName ${gmondClusterName}`;
-
-      if [ -e "${gmondCoreConfFileName}" ]
-      then 
-        gmondPidFileName=`getGmondPidFileName ${gmondClusterName}`;
-
-        ${GMOND_BIN} --conf=${gmondCoreConfFileName} --pid-file=${gmondPidFileName};
-
-        for i in `seq 0 5`; do
-          gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
-          if [ -n "${gmondRunningPid}" ]
-          then
-            break;
-          fi
-          sleep 1;
-        done
-  
-        if [ -n "${gmondRunningPid}" ]
-        then
-            echo "Started ${GMOND_BIN} for cluster ${gmondClusterName} with PID ${gmondRunningPid}";
-        else
-            echo "Failed to start ${GMOND_BIN} for cluster ${gmondClusterName}";
-            exit 1;
-        fi
-      fi 
-    else
-      echo "${GMOND_BIN} for cluster ${gmondClusterName} already running with PID ${gmondRunningPid}";
-    fi
-}
-
-# main()
-gmondClusterName=${1};
-
-if [ "x" == "x${gmondClusterName}" ]
-then
-    # No ${gmondClusterName} passed in as command-line arg, so start 
-    # all the gmonds we know about.
-    for gmondClusterName in `getConfiguredGangliaClusterNames`
-    do
-        startGmondForCluster ${gmondClusterName};
-    done
-else
-    # Just start the one ${gmondClusterName} that was asked for.
-    startGmondForCluster ${gmondClusterName};
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/startRrdcached.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/startRrdcached.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/startRrdcached.sh
deleted file mode 100644
index e79472b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/startRrdcached.sh
+++ /dev/null
@@ -1,69 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Slurp in all our user-customizable settings.
-source ./gangliaEnv.sh;
-
-# Get all our common constants etc. set up.
-source ./rrdcachedLib.sh;
-
-rrdcachedRunningPid=`getRrdcachedRunningPid`;
-
-# Only attempt to start rrdcached if there's not already one running.
-if [ -z "${rrdcachedRunningPid}" ]
-then
-    #changed because problem puppet had with nobody user
-    #sudo -u ${GMETAD_USER} ${RRDCACHED_BIN} -p ${RRDCACHED_PID_FILE} \
-    #         -m 664 -l unix:${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
-    #         -m 777 -P FLUSH,STATS,HELP -l unix:${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET} \
-    #         -b /var/lib/ganglia/rrds -B
-    su - ${GMETAD_USER} -c "${RRDCACHED_BIN} -p ${RRDCACHED_PID_FILE} \
-             -m 664 -l unix:${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
-             -m 777 -P FLUSH,STATS,HELP -l unix:${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET} \
-             -b ${RRDCACHED_BASE_DIR} -B"
-
-    # Ideally, we'd use ${RRDCACHED_BIN}'s -s ${WEBSERVER_GROUP} option for 
-    # this, but it doesn't take sometimes due to a lack of permissions,
-    # so perform the operation explicitly to be super-sure.
-    chgrp ${WEBSERVER_GROUP} ${RRDCACHED_ALL_ACCESS_UNIX_SOCKET};
-    chgrp ${WEBSERVER_GROUP} ${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET};
-
-    # Check to make sure rrdcached actually started up.
-    for i in `seq 0 5`; do
-      rrdcachedRunningPid=`getRrdcachedRunningPid`;
-      if [ -n "${rrdcachedRunningPid}" ]
-        then
-          break;
-      fi
-      sleep 1;
-    done
-
-    if [ -n "${rrdcachedRunningPid}" ]
-    then
-        echo "Started ${RRDCACHED_BIN} with PID ${rrdcachedRunningPid}";
-    else
-        echo "Failed to start ${RRDCACHED_BIN}";
-        exit 1;
-    fi
-else
-    echo "${RRDCACHED_BIN} already running with PID ${rrdcachedRunningPid}";
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/stopGmetad.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/stopGmetad.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/stopGmetad.sh
deleted file mode 100644
index 2764e0e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/stopGmetad.sh
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./gmetadLib.sh;
-
-gmetadRunningPid=`getGmetadRunningPid`;
-
-# Only go ahead with the termination if we could find a running PID.
-if [ -n "${gmetadRunningPid}" ]
-then
-    kill -KILL ${gmetadRunningPid};
-    echo "Stopped ${GMETAD_BIN} (with PID ${gmetadRunningPid})";
-fi
-
-# Poll again.
-gmetadRunningPid=`getGmetadRunningPid`;
-
-# Once we've killed gmetad, there should no longer be a running PID.
-if [ -z "${gmetadRunningPid}" ]
-then
-    # It's safe to stop rrdcached now.
-    ./stopRrdcached.sh;
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/stopGmond.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/stopGmond.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/stopGmond.sh
deleted file mode 100644
index 1af3eb9..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/stopGmond.sh
+++ /dev/null
@@ -1,54 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
-source ./gmondLib.sh;
-
-function stopGmondForCluster()
-{
-    gmondClusterName=${1};
-
-    gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
-
-    # Only go ahead with the termination if we could find a running PID.
-    if [ -n "${gmondRunningPid}" ]
-    then
-      kill -KILL ${gmondRunningPid};
-      echo "Stopped ${GMOND_BIN} for cluster ${gmondClusterName} (with PID ${gmondRunningPid})";
-    fi
-}
-
-# main()
-gmondClusterName=${1};
-
-if [ "x" == "x${gmondClusterName}" ]
-then
-    # No ${gmondClusterName} passed in as command-line arg, so stop
-    # all the gmonds we know about.
-    for gmondClusterName in `getConfiguredGangliaClusterNames`
-    do
-        stopGmondForCluster ${gmondClusterName};
-    done
-else
-    stopGmondForCluster ${gmondClusterName};
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/stopRrdcached.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/stopRrdcached.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/stopRrdcached.sh
deleted file mode 100644
index 0a0d8d8..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/stopRrdcached.sh
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get all our common constants etc. set up.
-source ./rrdcachedLib.sh;
-
-rrdcachedRunningPid=`getRrdcachedRunningPid`;
-
-# Only go ahead with the termination if we could find a running PID.
-if [ -n "${rrdcachedRunningPid}" ]
-then
-    kill -TERM ${rrdcachedRunningPid};
-    # ${RRDCACHED_BIN} takes a few seconds to drain its buffers, so wait 
-    # until we're sure it's well and truly dead. 
-    #
-    # Without this, an immediately following startRrdcached.sh won't do
-    # anything, because it still sees this soon-to-die instance alive,
-    # and the net result is that after a few seconds, there's no
-    # ${RRDCACHED_BIN} running on the box anymore.
-    sleep 5;
-    echo "Stopped ${RRDCACHED_BIN} (with PID ${rrdcachedRunningPid})";
-fi 

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/teardownGanglia.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/teardownGanglia.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/teardownGanglia.sh
deleted file mode 100644
index b27f7a2..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/files/teardownGanglia.sh
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-# Get access to Ganglia-wide constants, utilities etc.
-source ./gangliaLib.sh;
-
-# Undo what we did while setting up Ganglia on this box.
-rm -rf ${GANGLIA_CONF_DIR};
-rm -rf ${GANGLIA_RUNTIME_DIR};

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/scripts/ganglia.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/scripts/ganglia.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/scripts/ganglia.py
deleted file mode 100644
index 75626b1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/scripts/ganglia.py
+++ /dev/null
@@ -1,97 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-from resource_management import *
-import os
-
-
-def groups_and_users():
-  import params
-
-def config():
-  import params
-
-  shell_cmds_dir = params.ganglia_shell_cmds_dir
-  shell_files = ['checkGmond.sh', 'checkRrdcached.sh', 'gmetadLib.sh',
-                 'gmondLib.sh', 'rrdcachedLib.sh',
-                 'setupGanglia.sh', 'startGmetad.sh', 'startGmond.sh',
-                 'startRrdcached.sh', 'stopGmetad.sh',
-                 'stopGmond.sh', 'stopRrdcached.sh', 'teardownGanglia.sh']
-  Directory(shell_cmds_dir,
-            owner="root",
-            group="root",
-            recursive=True
-  )
-  init_file("gmetad")
-  init_file("gmond")
-  for sh_file in shell_files:
-    shell_file(sh_file)
-  for conf_file in ['gangliaClusters.conf', 'gangliaEnv.sh', 'gangliaLib.sh']:
-    ganglia_TemplateConfig(conf_file)
-
-
-def init_file(name):
-  import params
-
-  File("/etc/init.d/hdp-" + name,
-       content=StaticFile(name + ".init"),
-       mode=0755
-  )
-
-
-def shell_file(name):
-  import params
-
-  File(params.ganglia_shell_cmds_dir + os.sep + name,
-       content=StaticFile(name),
-       mode=0755
-  )
-
-
-def ganglia_TemplateConfig(name, mode=755, tag=None):
-  import params
-
-  TemplateConfig(format("{params.ganglia_shell_cmds_dir}/{name}"),
-                 owner="root",
-                 group="root",
-                 template_tag=tag,
-                 mode=mode
-  )
-
-
-def generate_daemon(ganglia_service,
-                    name=None,
-                    role=None,
-                    owner=None,
-                    group=None):
-  import params
-
-  cmd = ""
-  if ganglia_service == "gmond":
-    if role == "server":
-      cmd = "{params.ganglia_shell_cmds_dir}/setupGanglia.sh -c {name} -m -o {owner} -g {group}"
-    else:
-      cmd = "{params.ganglia_shell_cmds_dir}/setupGanglia.sh -c {name} -o {owner} -g {group}"
-  elif ganglia_service == "gmetad":
-    cmd = "{params.ganglia_shell_cmds_dir}/setupGanglia.sh -t -o {owner} -g {group}"
-  else:
-    raise Fail("Unexpected ganglia service")
-  Execute(format(cmd),
-          path=[params.ganglia_shell_cmds_dir, "/usr/sbin",
-                "/sbin:/usr/local/bin", "/bin", "/usr/bin"]
-  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/scripts/ganglia_monitor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/scripts/ganglia_monitor.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/scripts/ganglia_monitor.py
deleted file mode 100644
index 6ae004b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/scripts/ganglia_monitor.py
+++ /dev/null
@@ -1,176 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import sys
-import os
-from os import path
-from resource_management import *
-from ganglia import generate_daemon
-import ganglia
-import ganglia_monitor_service
-
-
-class GangliaMonitor(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env)
-    env.set_params(params)
-    self.config(env)
-
-  def start(self, env):
-    ganglia_monitor_service.monitor("start")
-
-  def stop(self, env):
-    ganglia_monitor_service.monitor("stop")
-
-
-  def status(self, env):
-    import status_params
-    pid_file_name = 'gmond.pid'
-    pid_file_count = 0
-    pid_dir = status_params.pid_dir
-    # Recursively check all existing gmond pid files
-    for cur_dir, subdirs, files in os.walk(pid_dir):
-      for file_name in files:
-        if file_name == pid_file_name:
-          pid_file = os.path.join(cur_dir, file_name)
-          check_process_status(pid_file)
-          pid_file_count += 1
-    if pid_file_count == 0: # If no any pid file is present
-      raise ComponentIsNotRunning()
-
-
-  def config(self, env):
-    import params
-
-    ganglia.groups_and_users()
-
-    Directory(params.ganglia_conf_dir,
-              owner="root",
-              group=params.user_group,
-              recursive=True
-    )
-
-    ganglia.config()
-
-    if params.is_namenode_master:
-      generate_daemon("gmond",
-                      name = "HDPNameNode",
-                      role = "monitor",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.is_jtnode_master:
-      generate_daemon("gmond",
-                      name = "HDPJobTracker",
-                      role = "monitor",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.is_rmnode_master:
-      generate_daemon("gmond",
-                      name = "HDPResourceManager",
-                      role = "monitor",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.is_hsnode_master:
-      generate_daemon("gmond",
-                      name = "HDPHistoryServer",
-                      role = "monitor",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.is_hbase_master:
-      generate_daemon("gmond",
-                      name = "HDPHBaseMaster",
-                      role = "monitor",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.is_nodemanager:
-      generate_daemon("gmond",
-                      name = "HDPNodeManager",
-                      role = "monitor",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.is_hsnode_master:
-      generate_daemon("gmond",
-                      name = "HDPHistoryServer",
-                      role = "monitor",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.is_slave:
-      generate_daemon("gmond",
-                      name = "HDPDataNode",
-                      role = "monitor",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.is_tasktracker:
-      generate_daemon("gmond",
-                      name = "HDPTaskTracker",
-                      role = "monitor",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.is_hbase_rs:
-      generate_daemon("gmond",
-                      name = "HDPHBaseRegionServer",
-                      role = "monitor",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.is_flume:
-      generate_daemon("gmond",
-                      name = "HDPFlumeServer",
-                      role = "monitor",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.is_jn_host:
-      generate_daemon("gmond",
-                      name = "HDPJournalNode",
-                      role = "monitor",
-                      owner = "root",
-                      group = params.user_group)
-
-    Directory(path.join(params.ganglia_dir, "conf.d"),
-              owner="root",
-              group=params.user_group
-    )
-
-    File(path.join(params.ganglia_dir, "conf.d/modgstatus.conf"),
-         owner="root",
-         group=params.user_group
-    )
-    File(path.join(params.ganglia_dir, "conf.d/multicpu.conf"),
-         owner="root",
-         group=params.user_group
-    )
-    File(path.join(params.ganglia_dir, "gmond.conf"),
-         owner="root",
-         group=params.user_group
-    )
-
-
-if __name__ == "__main__":
-  GangliaMonitor().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/scripts/ganglia_monitor_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/scripts/ganglia_monitor_service.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/scripts/ganglia_monitor_service.py
deleted file mode 100644
index d86d894..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/scripts/ganglia_monitor_service.py
+++ /dev/null
@@ -1,31 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-from resource_management import *
-
-
-def monitor(action=None):# 'start' or 'stop'
-  if action == "start":
-    Execute("chkconfig gmond off",
-            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-    )
-  Execute(
-    format(
-      "service hdp-gmond {action} >> /tmp/gmond.log  2>&1 ; /bin/ps auwx | /bin/grep [g]mond  >> /tmp/gmond.log  2>&1"),
-    path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
-  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/scripts/ganglia_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/scripts/ganglia_server.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/scripts/ganglia_server.py
deleted file mode 100644
index ab730de..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/scripts/ganglia_server.py
+++ /dev/null
@@ -1,197 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-import sys
-import os
-from os import path
-from resource_management import *
-from ganglia import generate_daemon
-import ganglia
-import ganglia_server_service
-
-
-class GangliaServer(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env)
-    env.set_params(params)
-    self.config(env)
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-    ganglia_server_service.server("start")
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-    ganglia_server_service.server("stop")
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    pid_file = format("{pid_dir}/gmetad.pid")
-    # Recursively check all existing gmetad pid files
-    check_process_status(pid_file)
-
-  def config(self, env):
-    import params
-
-    ganglia.groups_and_users()
-    ganglia.config()
-
-    if params.has_namenodes:
-      generate_daemon("gmond",
-                      name = "HDPNameNode",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_jobtracker:
-      generate_daemon("gmond",
-                      name = "HDPJobTracker",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_hbase_masters:
-      generate_daemon("gmond",
-                      name = "HDPHBaseMaster",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_resourcemanager:
-      generate_daemon("gmond",
-                      name = "HDPResourceManager",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_nodemanager:
-      generate_daemon("gmond",
-                      name = "HDPNodeManager",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_historyserver:
-      generate_daemon("gmond",
-                      name = "HDPHistoryServer",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_slaves:
-      generate_daemon("gmond",
-                      name = "HDPDataNode",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_tasktracker:
-      generate_daemon("gmond",
-                      name = "HDPTaskTracker",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_hbase_rs:
-      generate_daemon("gmond",
-                      name = "HDPHBaseRegionServer",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_flume:
-      generate_daemon("gmond",
-                      name = "HDPFlumeServer",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    if params.has_journalnode:
-      generate_daemon("gmond",
-                      name = "HDPJournalNode",
-                      role = "server",
-                      owner = "root",
-                      group = params.user_group)
-
-    generate_daemon("gmetad",
-                    name = "gmetad",
-                    role = "server",
-                    owner = "root",
-                    group = params.user_group)
-
-    change_permission()
-    server_files()
-    File(path.join(params.ganglia_dir, "gmetad.conf"),
-         owner="root",
-         group=params.user_group
-    )
-
-
-def change_permission():
-  import params
-
-  Directory('/var/lib/ganglia/dwoo',
-            mode=0777,
-            owner=params.gmetad_user,
-            recursive=True
-  )
-
-
-def server_files():
-  import params
-
-  rrd_py_path = params.rrd_py_path
-  Directory(rrd_py_path,
-            recursive=True
-  )
-  rrd_py_file_path = path.join(rrd_py_path, "rrd.py")
-  File(rrd_py_file_path,
-       content=StaticFile("rrd.py"),
-       mode=0755
-  )
-  rrd_file_owner = params.gmetad_user
-  if params.rrdcached_default_base_dir != params.rrdcached_base_dir:
-    Directory(params.rrdcached_base_dir,
-              owner=rrd_file_owner,
-              group=rrd_file_owner,
-              mode=0755,
-              recursive=True
-    )
-    Directory(params.rrdcached_default_base_dir,
-              action = "delete"
-    )
-    Link(params.rrdcached_default_base_dir,
-         to=params.rrdcached_base_dir
-    )
-  elif rrd_file_owner != 'nobody':
-    Directory(params.rrdcached_default_base_dir,
-              owner=rrd_file_owner,
-              group=rrd_file_owner,
-              recursive=True
-    )
-
-
-if __name__ == "__main__":
-  GangliaServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/scripts/ganglia_server_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/scripts/ganglia_server_service.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/scripts/ganglia_server_service.py
deleted file mode 100644
index b93e3f8..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/scripts/ganglia_server_service.py
+++ /dev/null
@@ -1,27 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-from resource_management import *
-
-
-def server(action=None):# 'start' or 'stop'
-  command = "service hdp-gmetad {action} >> /tmp/gmetad.log  2>&1 ; /bin/ps auwx | /bin/grep [g]metad  >> /tmp/gmetad.log  2>&1"
-  Execute(format(command),
-          path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
-  )
-  MonitorWebserver("restart")

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/scripts/params.py
deleted file mode 100644
index 32a7e4b..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/scripts/params.py
+++ /dev/null
@@ -1,80 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-from resource_management import *
-from resource_management.core.system import System
-
-config = Script.get_config()
-
-user_group = config['configurations']['global']["user_group"]
-ganglia_conf_dir = "/etc/ganglia/hdp"
-ganglia_dir = "/etc/ganglia"
-ganglia_runtime_dir = config['configurations']['global']["ganglia_runtime_dir"]
-ganglia_shell_cmds_dir = "/usr/libexec/hdp/ganglia"
-
-gmetad_user = config['configurations']['global']["gmetad_user"]
-gmond_user = config['configurations']['global']["gmond_user"]
-
-webserver_group = "apache"
-rrdcached_default_base_dir = "/var/lib/ganglia/rrds"
-rrdcached_base_dir = config['configurations']['global']["rrdcached_base_dir"]
-
-ganglia_server_host = config["clusterHostInfo"]["ganglia_server_host"][0]
-
-hostname = config["hostname"]
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-jtnode_host = default("/clusterHostInfo/jtnode_host", [])
-rm_host = default("/clusterHostInfo/rm_host", [])
-hs_host = default("/clusterHostInfo/hs_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-# datanodes are marked as slave_hosts
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-tt_hosts = default("/clusterHostInfo/mapred_tt_hosts", [])
-nm_hosts = default("/clusterHostInfo/nm_hosts", [])
-hbase_rs_hosts = default("/clusterHostInfo/hbase_rs_hosts", [])
-flume_hosts = default("/clusterHostInfo/flume_hosts", [])
-jn_hosts = default("/clusterHostInfo/journalnode_hosts", [])
-
-is_namenode_master = hostname in namenode_host
-is_jtnode_master = hostname in jtnode_host
-is_rmnode_master = hostname in rm_host
-is_hsnode_master = hostname in hs_host
-is_hbase_master = hostname in hbase_master_hosts
-is_slave = hostname in slave_hosts
-is_tasktracker = hostname in tt_hosts
-is_nodemanager = hostname in nm_hosts
-is_hbase_rs = hostname in hbase_rs_hosts
-is_flume = hostname in flume_hosts
-is_jn_host = hostname in jn_hosts
-
-has_namenodes = not len(namenode_host) == 0
-has_jobtracker = not len(jtnode_host) == 0
-has_resourcemanager = not len(rm_host) == 0
-has_historyserver = not len(hs_host) == 0
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_slaves = not len(slave_hosts) == 0
-has_tasktracker = not len(tt_hosts) == 0
-has_nodemanager = not len(nm_hosts) == 0
-has_hbase_rs = not len(hbase_rs_hosts) == 0
-has_flume = not len(flume_hosts) == 0
-has_journalnode = not len(jn_hosts) == 0
-
-if System.get_instance().platform == "suse":
-  rrd_py_path = '/srv/www/cgi-bin'
-else:
-  rrd_py_path = '/var/www/cgi-bin'

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/scripts/status_params.py
deleted file mode 100644
index 3ccad2f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/scripts/status_params.py
+++ /dev/null
@@ -1,25 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-pid_dir = config['configurations']['global']['ganglia_runtime_dir']

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/templates/gangliaClusters.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/templates/gangliaClusters.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/templates/gangliaClusters.conf.j2
deleted file mode 100644
index f3bb355..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/templates/gangliaClusters.conf.j2
+++ /dev/null
@@ -1,35 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-#########################################################
-### ClusterName           GmondMasterHost   GmondPort ###
-#########################################################
-
-    HDPJournalNode      {{ganglia_server_host}}   8654
-    HDPFlumeServer      {{ganglia_server_host}}   8655
-    HDPHBaseRegionServer       	{{ganglia_server_host}}   8656
-    HDPNodeManager     	{{ganglia_server_host}}   8657
-    HDPTaskTracker     	{{ganglia_server_host}}   8658
-    HDPDataNode       	{{ganglia_server_host}}   8659
-    HDPSlaves       	{{ganglia_server_host}}   8660
-    HDPNameNode         {{ganglia_server_host}}   8661
-    HDPJobTracker     	{{ganglia_server_host}}   8662
-    HDPHBaseMaster      {{ganglia_server_host}}   8663
-    HDPResourceManager  {{ganglia_server_host}}   8664
-    HDPHistoryServer    {{ganglia_server_host}}   8666
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/templates/gangliaEnv.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/templates/gangliaEnv.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/templates/gangliaEnv.sh.j2
deleted file mode 100644
index 1ead550..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/templates/gangliaEnv.sh.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-# Unix users and groups for the binaries we start up.
-GMETAD_USER={{gmetad_user}};
-GMOND_USER={{gmond_user}};
-WEBSERVER_GROUP={{webserver_group}};

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/templates/gangliaLib.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/templates/gangliaLib.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/templates/gangliaLib.sh.j2
deleted file mode 100644
index 4b5bdd1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/GANGLIA/package/templates/gangliaLib.sh.j2
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/bin/sh
-
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-cd `dirname ${0}`;
-
-GANGLIA_CONF_DIR={{ganglia_conf_dir}};
-GANGLIA_RUNTIME_DIR={{ganglia_runtime_dir}};
-RRDCACHED_BASE_DIR={{rrdcached_base_dir}};
-
-# This file contains all the info about each Ganglia Cluster in our Grid.
-GANGLIA_CLUSTERS_CONF_FILE=./gangliaClusters.conf;
-
-function createDirectory()
-{
-    directoryPath=${1};
-
-    if [ "x" != "x${directoryPath}" ]
-    then
-        mkdir -p ${directoryPath};
-    fi
-}
-
-function getGangliaClusterInfo()
-{
-    clusterName=${1};
-
-    if [ "x" != "x${clusterName}" ]
-    then
-        # Fetch the particular entry for ${clusterName} from ${GANGLIA_CLUSTERS_CONF_FILE}.
-        awk -v clusterName=${clusterName} '($1 !~ /^#/) && ($1 == clusterName)' ${GANGLIA_CLUSTERS_CONF_FILE};
-    else
-        # Spit out all the non-comment, non-empty lines from ${GANGLIA_CLUSTERS_CONF_FILE}.
-        awk '($1 !~ /^#/) && (NF)' ${GANGLIA_CLUSTERS_CONF_FILE};
-    fi
-}
-
-function getConfiguredGangliaClusterNames()
-{
-  # Find all the subdirectories in ${GANGLIA_CONF_DIR} and extract only 
-  # the subdirectory name from each.
-  if [ -e ${GANGLIA_CONF_DIR} ]
-  then  
-    find ${GANGLIA_CONF_DIR} -maxdepth 1 -mindepth 1 -type d | xargs -n1 basename;
-  fi
-}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/configuration/global.xml
deleted file mode 100644
index b2c57bd..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/configuration/global.xml
+++ /dev/null
@@ -1,160 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>hbasemaster_host</name>
-    <value></value>
-    <description>HBase Master Host.</description>
-  </property>
-  <property>
-    <name>regionserver_hosts</name>
-    <value></value>
-    <description>Region Server Hosts</description>
-  </property>
-  <property>
-    <name>hbase_log_dir</name>
-    <value>/var/log/hbase</value>
-    <description>Log Directories for HBase.</description>
-  </property>
-  <property>
-    <name>hbase_pid_dir</name>
-    <value>/var/run/hbase</value>
-    <description>Log Directories for HBase.</description>
-  </property>
-  <property>
-    <name>hbase_log_dir</name>
-    <value>/var/log/hbase</value>
-    <description>Log Directories for HBase.</description>
-  </property>
-  <property>
-    <name>hbase_regionserver_heapsize</name>
-    <value>1024</value>
-    <description>Log Directories for HBase.</description>
-  </property>
-  <property>
-    <name>hbase_master_heapsize</name>
-    <value>1024</value>
-    <description>HBase Master Heap Size</description>
-  </property>
-  <property>
-    <name>hstore_compactionthreshold</name>
-    <value>3</value>
-    <description>HBase HStore compaction threshold.</description>
-  </property>
-  <property>
-    <name>hfile_blockcache_size</name>
-    <value>0.40</value>
-    <description>HFile block cache size.</description>
-  </property>
-  <property>
-    <name>hstorefile_maxsize</name>
-    <value>10737418240</value>
-    <description>Maximum HStoreFile Size</description>
-  </property>
-    <property>
-    <name>regionserver_handlers</name>
-    <value>60</value>
-    <description>HBase RegionServer Handler</description>
-  </property>
-    <property>
-    <name>hregion_majorcompaction</name>
-    <value>604800000</value>
-    <description>The time between major compactions of all HStoreFiles in a region. Set to 0 to disable automated major compactions.</description>
-  </property>
-    <property>
-    <name>hregion_blockmultiplier</name>
-    <value>2</value>
-    <description>HBase Region Block Multiplier</description>
-  </property>
-    <property>
-    <name>hregion_memstoreflushsize</name>
-    <value></value>
-    <description>HBase Region MemStore Flush Size.</description>
-  </property>
-    <property>
-    <name>client_scannercaching</name>
-    <value>100</value>
-    <description>Base Client Scanner Caching</description>
-  </property>
-    <property>
-    <name>zookeeper_sessiontimeout</name>
-    <value>30000</value>
-    <description>ZooKeeper Session Timeout</description>
-  </property>
-    <property>
-    <name>hfile_max_keyvalue_size</name>
-    <value>10485760</value>
-    <description>HBase Client Maximum key-value Size</description>
-  </property>
-  <property>
-    <name>hbase_hdfs_root_dir</name>
-    <value>/apps/hbase/data</value>
-    <description>HBase Relative Path to HDFS.</description>
-  </property>
-   <property>
-    <name>hbase_conf_dir</name>
-    <value>/etc/hbase</value>
-    <description>Config Directory for HBase.</description>
-  </property>
-   <property>
-    <name>hdfs_enable_shortcircuit_read</name>
-    <value>true</value>
-    <description>HDFS Short Circuit Read</description>
-  </property>
-   <property>
-    <name>hdfs_support_append</name>
-    <value>true</value>
-    <description>HDFS append support</description>
-  </property>
-   <property>
-    <name>hstore_blockingstorefiles</name>
-    <value>10</value>
-    <description>HStore blocking storefiles.</description>
-  </property>
-   <property>
-    <name>regionserver_memstore_lab</name>
-    <value>true</value>
-    <description>Region Server memstore.</description>
-  </property>
-   <property>
-    <name>regionserver_memstore_lowerlimit</name>
-    <value>0.38</value>
-    <description>Region Server memstore lower limit.</description>
-  </property>
-   <property>
-    <name>regionserver_memstore_upperlimit</name>
-    <value>0.4</value>
-    <description>Region Server memstore upper limit.</description>
-  </property>
-   <property>
-    <name>hbase_conf_dir</name>
-    <value>/etc/hbase</value>
-    <description>HBase conf dir.</description>
-  </property>
-   <property>
-    <name>hbase_user</name>
-    <value>hbase</value>
-    <description>HBase User Name.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/configuration/hbase-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/configuration/hbase-policy.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/configuration/hbase-policy.xml
deleted file mode 100644
index e45f23c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/configuration/hbase-policy.xml
+++ /dev/null
@@ -1,53 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>security.client.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HRegionInterface protocol implementations (ie. 
-    clients talking to HRegionServers)
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.admin.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HMasterInterface protocol implementation (ie. 
-    clients talking to HMaster for admin operations).
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-
-  <property>
-    <name>security.masterregion.protocol.acl</name>
-    <value>*</value>
-    <description>ACL for HMasterRegionInterface protocol implementations
-    (for HRegionServers communicating with HMaster)
-    The ACL is a comma-separated list of user and group names. The user and 
-    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
-    A special value of "*" means all users are allowed.</description>
-  </property>
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/configuration/hbase-site.xml
deleted file mode 100644
index bf4af7d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/configuration/hbase-site.xml
+++ /dev/null
@@ -1,356 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-<configuration>
-  <property>
-    <name>hbase.rootdir</name>
-    <value>hdfs://localhost:8020/apps/hbase/data</value>
-    <description>The directory shared by region servers and into
-    which HBase persists.  The URL should be 'fully-qualified'
-    to include the filesystem scheme.  For example, to specify the
-    HDFS directory '/hbase' where the HDFS instance's namenode is
-    running at namenode.example.org on port 9000, set this value to:
-    hdfs://namenode.example.org:9000/hbase.  By default HBase writes
-    into /tmp.  Change this configuration else all data will be lost
-    on machine restart.
-    </description>
-  </property>
-  <property>
-    <name>hbase.cluster.distributed</name>
-    <value>true</value>
-    <description>The mode the cluster will be in. Possible values are
-      false for standalone mode and true for distributed mode.  If
-      false, startup will run all HBase and ZooKeeper daemons together
-      in the one JVM.
-    </description>
-  </property>
-  <property>
-    <name>hbase.tmp.dir</name>
-    <value>/hadoop/hbase</value>
-    <description>Temporary directory on the local filesystem.
-    Change this setting to point to a location more permanent
-    than '/tmp' (The '/tmp' directory is often cleared on
-    machine restart).
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.bindAddress</name>
-    <value></value>
-    <description>The bind address for the HBase Master web UI
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.info.port</name>
-    <value></value>
-    <description>The port for the HBase Master web UI.</description>
-  </property>
-  <property>
-    <name>hbase.regionserver.info.port</name>
-    <value></value>
-    <description>The port for the HBase RegionServer web UI.</description>
-  </property>
-  <property>
-    <name>hbase.regionserver.global.memstore.upperLimit</name>
-    <value>0.4</value>
-    <description>Maximum size of all memstores in a region server before new
-      updates are blocked and flushes are forced. Defaults to 40% of heap
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.handler.count</name>
-    <value>60</value>
-    <description>Count of RPC Listener instances spun up on RegionServers.
-    Same property is used by the Master for count of master handlers.
-    Default is 10.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.majorcompaction</name>
-    <value>86400000</value>
-    <description>The time (in milliseconds) between 'major' compactions of all
-    HStoreFiles in a region.  Default: 1 day.
-    Set to 0 to disable automated major compactions.
-    </description>
-  </property>
-  
-  <property>
-    <name>hbase.regionserver.global.memstore.lowerLimit</name>
-    <value>0.38</value>
-    <description>When memstores are being forced to flush to make room in
-      memory, keep flushing until we hit this mark. Defaults to 35% of heap.
-      This value equal to hbase.regionserver.global.memstore.upperLimit causes
-      the minimum possible flushing to occur when updates are blocked due to
-      memstore limiting.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.block.multiplier</name>
-    <value>2</value>
-    <description>Block updates if memstore has hbase.hregion.memstore.block.multiplier
-    time hbase.hregion.flush.size bytes.  Useful preventing
-    runaway memstore during spikes in update traffic.  Without an
-    upper-bound, memstore fills such that when it flushes the
-    resultant flush files take a long time to compact or split, or
-    worse, we OOME
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.flush.size</name>
-    <value>134217728</value>
-    <description>
-    Memstore will be flushed to disk if size of the memstore
-    exceeds this number of bytes.  Value is checked by a thread that runs
-    every hbase.server.thread.wakefrequency.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.memstore.mslab.enabled</name>
-    <value>true</value>
-    <description>
-      Enables the MemStore-Local Allocation Buffer,
-      a feature which works to prevent heap fragmentation under
-      heavy write loads. This can reduce the frequency of stop-the-world
-      GC pauses on large heaps.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hregion.max.filesize</name>
-    <value>10737418240</value>
-    <description>
-    Maximum HStoreFile size. If any one of a column families' HStoreFiles has
-    grown to exceed this value, the hosting HRegion is split in two.
-    Default: 1G.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.scanner.caching</name>
-    <value>100</value>
-    <description>Number of rows that will be fetched when calling next
-    on a scanner if it is not served from (local, client) memory. Higher
-    caching values will enable faster scanners but will eat up more memory
-    and some calls of next may take longer and longer times when the cache is empty.
-    Do not set this value such that the time between invocations is greater
-    than the scanner timeout; i.e. hbase.regionserver.lease.period
-    </description>
-  </property>
-  <property>
-    <name>zookeeper.session.timeout</name>
-    <value>30000</value>
-    <description>ZooKeeper session timeout.
-      HBase passes this to the zk quorum as suggested maximum time for a
-      session (This setting becomes zookeeper's 'maxSessionTimeout').  See
-      http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions
-      "The client sends a requested timeout, the server responds with the
-      timeout that it can give the client. " In milliseconds.
-    </description>
-  </property>
-  <property>
-    <name>hbase.client.keyvalue.maxsize</name>
-    <value>10485760</value>
-    <description>Specifies the combined maximum allowed size of a KeyValue
-    instance. This is to set an upper boundary for a single entry saved in a
-    storage file. Since they cannot be split it helps avoiding that a region
-    cannot be split any further because the data is too large. It seems wise
-    to set this to a fraction of the maximum region size. Setting it to zero
-    or less disables the check.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.compactionThreshold</name>
-    <value>3</value>
-    <description>
-    If more than this number of HStoreFiles in any one HStore
-    (one HStoreFile is written per flush of memstore) then a compaction
-    is run to rewrite all HStoreFiles files as one.  Larger numbers
-    put off compaction but when it runs, it takes longer to complete.
-    </description>
-  </property>
-  <property>
-    <name>hbase.hstore.flush.retries.number</name>
-    <value>120</value>
-    <description>
-    The number of times the region flush operation will be retried.
-    </description>
-  </property>
-  
-  <property>
-    <name>hbase.hstore.blockingStoreFiles</name>
-    <value>10</value>
-    <description>
-    If more than this number of StoreFiles in any one Store
-    (one StoreFile is written per flush of MemStore) then updates are
-    blocked for this HRegion until a compaction is completed, or
-    until hbase.hstore.blockingWaitTime has been exceeded.
-    </description>
-  </property>
-  <property>
-    <name>hfile.block.cache.size</name>
-    <value>0.40</value>
-    <description>
-        Percentage of maximum heap (-Xmx setting) to allocate to block cache
-        used by HFile/StoreFile. Default of 0.25 means allocate 25%.
-        Set to 0 to disable but it's not recommended.
-    </description>
-  </property>
-
-  <!-- The following properties configure authentication information for
-       HBase processes when using Kerberos security.  There are no default
-       values, included here for documentation purposes -->
-  <property>
-    <name>hbase.master.keytab.file</name>
-    <value></value>
-    <description>Full path to the kerberos keytab file to use for logging in
-    the configured HMaster server principal.
-    </description>
-  </property>
-  <property>
-    <name>hbase.master.kerberos.principal</name>
-    <value></value>
-    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
-    that should be used to run the HMaster process.  The principal name should
-    be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the hostname
-    portion, it will be replaced with the actual hostname of the running
-    instance.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.keytab.file</name>
-    <value></value>
-    <description>Full path to the kerberos keytab file to use for logging in
-    the configured HRegionServer server principal.
-    </description>
-  </property>
-  <property>
-    <name>hbase.regionserver.kerberos.principal</name>
-    <value></value>
-    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
-    that should be used to run the HRegionServer process.  The principal name
-    should be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the
-    hostname portion, it will be replaced with the actual hostname of the
-    running instance.  An entry for this principal must exist in the file
-    specified in hbase.regionserver.keytab.file
-    </description>
-  </property>
-
-  <!-- Additional configuration specific to HBase security -->
-  <property>
-    <name>hbase.superuser</name>
-    <value>hbase</value>
-    <description>List of users or groups (comma-separated), who are allowed
-    full privileges, regardless of stored ACLs, across the cluster.
-    Only used when HBase security is enabled.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.security.authentication</name>
-    <value>simple</value>
-  </property>
-
-  <property>
-    <name>hbase.security.authorization</name>
-    <value>false</value>
-    <description>Enables HBase authorization. Set the value of this property to false to disable HBase authorization.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.coprocessor.region.classes</name>
-    <value></value>
-    <description>A comma-separated list of Coprocessors that are loaded by
-    default on all tables. For any override coprocessor method, these classes
-    will be called in order. After implementing your own Coprocessor, just put
-    it in HBase's classpath and add the fully qualified class name here.
-    A coprocessor can also be loaded on demand by setting HTableDescriptor.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.coprocessor.master.classes</name>
-    <value></value>
-    <description>A comma-separated list of
-      org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
-      loaded by default on the active HMaster process. For any implemented
-      coprocessor methods, the listed classes will be called in order. After
-      implementing your own MasterObserver, just put it in HBase's classpath
-      and add the fully qualified class name here.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.zookeeper.property.clientPort</name>
-    <value>2181</value>
-    <description>Property from ZooKeeper's config zoo.cfg.
-    The port at which the clients will connect.
-    </description>
-  </property>
-
-  <!--
-  The following three properties are used together to create the list of
-  host:peer_port:leader_port quorum servers for ZooKeeper.
-  -->
-  <property>
-    <name>hbase.zookeeper.quorum</name>
-    <value>localhost</value>
-    <description>Comma separated list of servers in the ZooKeeper Quorum.
-    For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
-    By default this is set to localhost for local and pseudo-distributed modes
-    of operation. For a fully-distributed setup, this should be set to a full
-    list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
-    this is the list of servers which we will start/stop ZooKeeper on.
-    </description>
-  </property>
-  <!-- End of properties used to generate ZooKeeper host:port quorum list. -->
-
-  <property>
-    <name>hbase.zookeeper.useMulti</name>
-    <value>true</value>
-    <description>Instructs HBase to make use of ZooKeeper's multi-update functionality.
-    This allows certain ZooKeeper operations to complete more quickly and prevents some issues
-    with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).·
-    IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+
-    and will not be downgraded.  ZooKeeper versions before 3.4 do not support multi-update and will
-    not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).
-    </description>
-  </property>
-  <property>
-    <name>zookeeper.znode.parent</name>
-    <value>/hbase-unsecure</value>
-    <description>Root ZNode for HBase in ZooKeeper. All of HBase's ZooKeeper
-      files that are configured with a relative path will go under this node.
-      By default, all of HBase's ZooKeeper file path are configured with a
-      relative path, so they will all go under this directory unless changed.
-    </description>
-  </property>
-
-  <property>
-    <name>hbase.defaults.for.version.skip</name>
-    <value>true</value>
-    <description>Disables version verification.</description>
-  </property>
-
-  <property>
-    <name>dfs.domain.socket.path</name>
-    <value>/var/lib/hadoop-hdfs/dn_socket</value>
-    <description>Path to domain socket.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/metainfo.xml
deleted file mode 100644
index 7227b6e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HBASE/metainfo.xml
+++ /dev/null
@@ -1,93 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>HBASE</name>
-      <comment>Non-relational distributed database and centralized service for configuration management &amp;
-        synchronization
-      </comment>
-      <version>0.96.0.2.0.6.0</version>
-      <components>
-        <component>
-          <name>HBASE_MASTER</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/hbase_master.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-          <customCommands>
-            <customCommand>
-              <name>DECOMMISSION</name>
-              <commandScript>
-                <script>scripts/hbase_master.py</script>
-                <scriptType>PYTHON</scriptType>
-                <timeout>600</timeout>
-              </commandScript>
-            </customCommand>
-          </customCommands>
-        </component>
-
-        <component>
-          <name>HBASE_REGIONSERVER</name>
-          <category>SLAVE</category>
-          <commandScript>
-            <script>scripts/hbase_regionserver.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>HBASE_CLIENT</name>
-          <category>CLIENT</category>
-          <commandScript>
-            <script>scripts/hbase_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osType>centos6</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>hbase</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <configuration-dependencies>
-        <config-type>global</config-type>
-        <config-type>hbase-policy</config-type>
-        <config-type>hbase-site</config-type>
-      </configuration-dependencies>
-
-    </service>
-  </services>
-</metainfo>


[05/37] AMBARI-4341. Rename 2.0.8 to 2.1.1 in the stack definition. (mahadev)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/params.py
new file mode 100644
index 0000000..1ad1412
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/params.py
@@ -0,0 +1,79 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import status_params
+
+# server configurations
+config = Script.get_config()
+
+oozie_user = config['configurations']['global']['oozie_user']
+smokeuser = config['configurations']['global']['smokeuser']
+conf_dir = "/etc/oozie/conf"
+hadoop_conf_dir = "/etc/hadoop/conf"
+user_group = config['configurations']['global']['user_group']
+jdk_location = config['hostLevelParams']['jdk_location']
+check_db_connection_jar_name = "DBConnectionVerification.jar"
+check_db_connection_jar = format("/usr/lib/ambari-agent/{check_db_connection_jar_name}")
+hadoop_prefix = "/usr"
+oozie_tmp_dir = "/var/tmp/oozie"
+oozie_hdfs_user_dir = format("/user/{oozie_user}")
+oozie_pid_dir = status_params.oozie_pid_dir
+pid_file = status_params.pid_file
+hadoop_jar_location = "/usr/lib/hadoop/"
+# for HDP1 it's "/usr/share/HDP-oozie/ext.zip"
+ext_js_path = "/usr/share/HDP-oozie/ext-2.2.zip"
+oozie_libext_dir = "/usr/lib/oozie/libext"
+lzo_enabled = config['configurations']['global']['lzo_enabled']
+security_enabled = config['configurations']['global']['security_enabled']
+
+kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
+oozie_service_keytab = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.keytab.file']
+oozie_principal = config['configurations']['oozie-site']['oozie.service.HadoopAccessorService.kerberos.principal']
+smokeuser_keytab = config['configurations']['global']['smokeuser_keytab']
+oozie_keytab = config['configurations']['global']['oozie_keytab']
+
+oracle_driver_jar_name = "ojdbc6.jar"
+java_share_dir = "/usr/share/java"
+
+java_home = config['hostLevelParams']['java_home']
+oozie_metastore_user_name = config['configurations']['oozie-site']['oozie.service.JPAService.jdbc.username']
+oozie_metastore_user_passwd = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.password","")
+oozie_jdbc_connection_url = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.url", "")
+oozie_log_dir = config['configurations']['global']['oozie_log_dir']
+oozie_data_dir = config['configurations']['global']['oozie_data_dir']
+oozie_lib_dir = "/var/lib/oozie/"
+oozie_webapps_dir = "/var/lib/oozie/oozie-server/webapps/"
+
+jdbc_driver_name = default("/configurations/oozie-site/oozie.service.JPAService.jdbc.driver", "")
+
+if jdbc_driver_name == "com.mysql.jdbc.Driver":
+  jdbc_driver_jar = "/usr/share/java/mysql-connector-java.jar"
+elif jdbc_driver_name == "oracle.jdbc.driver.OracleDriver":
+  jdbc_driver_jar = "/usr/share/java/ojdbc6.jar"
+else:
+  jdbc_driver_jar = ""
+
+hostname = config["hostname"]
+falcon_host = default("/clusterHostInfo/falcon_server_hosts", [])
+has_falcon_host = not len(falcon_host)  == 0
+falcon_home = '/usr/lib/falcon'
+
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/service_check.py
new file mode 100644
index 0000000..7c1c1f2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/service_check.py
@@ -0,0 +1,67 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+class OozieServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    
+    # on HDP1 this file is different
+    smoke_test_file_name = 'oozieSmoke2.sh'
+
+    oozie_smoke_shell_file( smoke_test_file_name)
+  
+def oozie_smoke_shell_file(
+  file_name
+):
+  import params
+
+  File( format("/tmp/{file_name}"),
+    content = StaticFile(file_name),
+    mode = 0755
+  )
+  
+  if params.security_enabled:
+    sh_cmd = format("sh /tmp/{file_name} {conf_dir} {hadoop_conf_dir} {smokeuser} {security_enabled} {smokeuser_keytab} {kinit_path_local}")
+  else:
+    sh_cmd = format("sh /tmp/{file_name} {conf_dir} {hadoop_conf_dir} {smokeuser} {security_enabled}")
+
+  Execute( format("/tmp/{file_name}"),
+    command   = sh_cmd,
+    tries     = 3,
+    try_sleep = 5,
+    logoutput = True
+  )
+    
+def main():
+  import sys
+  command_type = 'service_check'
+  command_data_file = '/root/workspace/Oozie/input.json'
+  basedir = '/root/workspace/Oozie/main'
+  sys.argv = ["", command_type, command_data_file, basedir]
+  
+  OozieServiceCheck().execute()
+  
+if __name__ == "__main__":
+  OozieServiceCheck().execute()
+  #main()
+  

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/status_params.py
new file mode 100644
index 0000000..c44fcf4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/scripts/status_params.py
@@ -0,0 +1,26 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+oozie_pid_dir = config['configurations']['global']['oozie_pid_dir']
+pid_file = format("{oozie_pid_dir}/oozie.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/templates/oozie-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/templates/oozie-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/templates/oozie-env.sh.j2
new file mode 100644
index 0000000..270a1a8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/templates/oozie-env.sh.j2
@@ -0,0 +1,64 @@
+#!/bin/bash
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+# 
+#      http://www.apache.org/licenses/LICENSE-2.0
+# 
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#Set JAVA HOME
+export JAVA_HOME={{java_home}}
+
+# Set Oozie specific environment variables here.
+
+# Settings for the Embedded Tomcat that runs Oozie
+# Java System properties for Oozie should be specified in this variable
+#
+# export CATALINA_OPTS=
+
+# Oozie configuration file to load from Oozie configuration directory
+#
+# export OOZIE_CONFIG_FILE=oozie-site.xml
+
+# Oozie logs directory
+#
+export OOZIE_LOG={{oozie_log_dir}}
+
+# Oozie pid directory
+#
+export CATALINA_PID={{pid_file}}
+
+#Location of the data for oozie
+export OOZIE_DATA={{oozie_data_dir}}
+
+# Oozie Log4J configuration file to load from Oozie configuration directory
+#
+# export OOZIE_LOG4J_FILE=oozie-log4j.properties
+
+# Reload interval of the Log4J configuration file, in seconds
+#
+# export OOZIE_LOG4J_RELOAD=10
+
+# The port Oozie server runs
+#
+# export OOZIE_HTTP_PORT=11000
+
+# The host name Oozie server runs on
+#
+# export OOZIE_HTTP_HOSTNAME=`hostname -f`
+
+# The base URL for callback URLs to Oozie
+#
+# export OOZIE_BASE_URL="http://${OOZIE_HTTP_HOSTNAME}:${OOZIE_HTTP_PORT}/oozie"
+export JAVA_LIBRARY_PATH=/usr/lib/hadoop/lib/native/Linux-amd64-64

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/templates/oozie-log4j.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/templates/oozie-log4j.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/templates/oozie-log4j.properties.j2
new file mode 100644
index 0000000..e4a2662
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/OOZIE/package/templates/oozie-log4j.properties.j2
@@ -0,0 +1,74 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License. See accompanying LICENSE file.
+#
+
+# If the Java System property 'oozie.log.dir' is not defined at Oozie start up time
+# XLogService sets its value to '${oozie.home}/logs'
+
+log4j.appender.oozie=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozie.DatePattern='.'yyyy-MM-dd-HH
+log4j.appender.oozie.File=${oozie.log.dir}/oozie.log
+log4j.appender.oozie.Append=true
+log4j.appender.oozie.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozie.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.oozieops=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozieops.DatePattern='.'yyyy-MM-dd
+log4j.appender.oozieops.File=${oozie.log.dir}/oozie-ops.log
+log4j.appender.oozieops.Append=true
+log4j.appender.oozieops.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozieops.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.oozieinstrumentation=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozieinstrumentation.DatePattern='.'yyyy-MM-dd
+log4j.appender.oozieinstrumentation.File=${oozie.log.dir}/oozie-instrumentation.log
+log4j.appender.oozieinstrumentation.Append=true
+log4j.appender.oozieinstrumentation.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozieinstrumentation.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.oozieaudit=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.oozieaudit.DatePattern='.'yyyy-MM-dd
+log4j.appender.oozieaudit.File=${oozie.log.dir}/oozie-audit.log
+log4j.appender.oozieaudit.Append=true
+log4j.appender.oozieaudit.layout=org.apache.log4j.PatternLayout
+log4j.appender.oozieaudit.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.appender.openjpa=org.apache.log4j.DailyRollingFileAppender
+log4j.appender.openjpa.DatePattern='.'yyyy-MM-dd
+log4j.appender.openjpa.File=${oozie.log.dir}/oozie-jpa.log
+log4j.appender.openjpa.Append=true
+log4j.appender.openjpa.layout=org.apache.log4j.PatternLayout
+log4j.appender.openjpa.layout.ConversionPattern=%d{ISO8601} %5p %c{1}:%L - %m%n
+
+log4j.logger.openjpa=INFO, openjpa
+log4j.logger.oozieops=INFO, oozieops
+log4j.logger.oozieinstrumentation=ALL, oozieinstrumentation
+log4j.logger.oozieaudit=ALL, oozieaudit
+log4j.logger.org.apache.oozie=INFO, oozie
+log4j.logger.org.apache.hadoop=WARN, oozie
+log4j.logger.org.mortbay=WARN, oozie
+log4j.logger.org.hsqldb=WARN, oozie
+log4j.logger.org.apache.hadoop.security.authentication.server=INFO, oozie

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/configuration/pig.properties
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/configuration/pig.properties b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/configuration/pig.properties
new file mode 100644
index 0000000..01000b5
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/configuration/pig.properties
@@ -0,0 +1,52 @@
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Pig default configuration file. All values can be overwritten by pig.properties and command line arguments.
+# see bin/pig -help
+
+# brief logging (no timestamps)
+brief=false
+
+#debug level, INFO is default
+debug=INFO
+
+#verbose print all log messages to screen (default to print only INFO and above to screen)
+verbose=false
+
+#exectype local|mapreduce, mapreduce is default
+exectype=mapreduce
+
+#Enable insertion of information about script into hadoop job conf 
+pig.script.info.enabled=true
+
+#Do not spill temp files smaller than this size (bytes)
+pig.spill.size.threshold=5000000
+#EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)
+#This should help reduce the number of files being spilled.
+pig.spill.gc.activation.size=40000000
+
+#the following two parameters are to help estimate the reducer number
+pig.exec.reducers.bytes.per.reducer=1000000000
+pig.exec.reducers.max=999
+
+#Temporary location to store the intermediate data.
+pig.temp.dir=/tmp/
+
+#Threshold for merging FRJoin fragment files
+pig.files.concatenation.threshold=100
+pig.optimistic.files.concatenation=false;
+
+pig.disable.counter=false

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/metainfo.xml
new file mode 100644
index 0000000..2e7d493
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/metainfo.xml
@@ -0,0 +1,60 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>PIG</name>
+      <comment>Scripting platform for analyzing large datasets</comment>
+      <version>0.12.0.2.1.1</version>
+      <components>
+        <component>
+          <name>PIG</name>
+          <category>CLIENT</category>
+          <commandScript>
+            <script>scripts/pig_client.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osType>centos6</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>pig</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>global</config-type>
+      </configuration-dependencies>
+
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/files/pigSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/files/pigSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/files/pigSmoke.sh
new file mode 100644
index 0000000..a22456e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/files/pigSmoke.sh
@@ -0,0 +1,18 @@
+/*Licensed to the Apache Software Foundation (ASF) under one or more
+ contributor license agreements.  See the NOTICE file distributed with
+ this work for additional information regarding copyright ownership.
+ The ASF licenses this file to You under the Apache License, Version 2.0
+ (the "License"); you may not use this file except in compliance with
+ the License.  You may obtain a copy of the License at
+
+     http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License */
+
+A = load 'passwd' using PigStorage(':');
+B = foreach A generate \$0 as id;
+store B into 'pigsmoke.out';

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/scripts/params.py
new file mode 100644
index 0000000..86e962c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/scripts/params.py
@@ -0,0 +1,36 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+# server configurations
+config = Script.get_config()
+
+pig_conf_dir = "/etc/pig/conf"
+hadoop_conf_dir = "/etc/hadoop/conf"
+hdfs_user = config['configurations']['global']['hdfs_user']
+smokeuser = config['configurations']['global']['smokeuser']
+user_group = config['configurations']['global']['user_group']
+
+# not supporting 32 bit jdk.
+java64_home = config['hostLevelParams']['java_home']
+hadoop_home = "/usr"
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/scripts/pig.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/scripts/pig.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/scripts/pig.py
new file mode 100644
index 0000000..c2d7b02
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/scripts/pig.py
@@ -0,0 +1,46 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+def pig():
+  import params
+
+  Directory( params.pig_conf_dir,
+    owner = params.hdfs_user,
+    group = params.user_group
+  )
+
+  pig_TemplateConfig( ['pig-env.sh','pig.properties','log4j.properties'])
+  
+  
+def pig_TemplateConfig(name):
+  import params
+  
+  if not isinstance(name, list):
+    name = [name]
+    
+  for x in name:
+    TemplateConfig( format("{pig_conf_dir}/{x}"),
+        owner = params.hdfs_user
+    )
+  
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/scripts/pig_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/scripts/pig_client.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/scripts/pig_client.py
new file mode 100644
index 0000000..acd0cb1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/scripts/pig_client.py
@@ -0,0 +1,52 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+import sys
+from resource_management import *
+from pig import pig
+
+         
+class PigClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+    
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    pig()
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+    
+#for tests
+def main():
+  command_type = 'install'
+  command_data_file = '/root/workspace/Pig/input.json'
+  basedir = '/root/workspace/Pig/main'
+  sys.argv = ["", command_type, command_data_file, basedir]
+  
+  PigClient().execute()
+  
+if __name__ == "__main__":
+  #main()
+  PigClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/scripts/service_check.py
new file mode 100644
index 0000000..3cca087
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/scripts/service_check.py
@@ -0,0 +1,75 @@
+#!/usr/bin/env python2.6
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+Ambari Agent
+
+"""
+
+from resource_management import *
+
+class PigServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    
+    input_file = 'passwd'
+    output_file = "pigsmoke.out"
+  
+    cleanup_cmd = format("dfs -rmr {output_file} {input_file}")
+    #cleanup put below to handle retries; if retrying there wil be a stale file that needs cleanup; exit code is fn of second command
+    create_file_cmd = format("{cleanup_cmd}; hadoop dfs -put /etc/passwd {input_file} ") #TODO: inconsistent that second command needs hadoop
+    test_cmd = format("fs -test -e {output_file}")
+  
+    ExecuteHadoop( create_file_cmd,
+      tries     = 3,
+      try_sleep = 5,
+      user      = params.smokeuser,
+      conf_dir = params.hadoop_conf_dir
+    )
+  
+    File( '/tmp/pigSmoke.sh',
+      content = StaticFile("pigSmoke.sh"),
+      mode = 0755
+    )
+  
+    Execute( "pig /tmp/pigSmoke.sh",
+      tries     = 3,
+      try_sleep = 5,
+      path      = '/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+      user      = params.smokeuser,
+      logoutput = True
+    )
+  
+    ExecuteHadoop( test_cmd,
+      user      = params.smokeuser,
+      conf_dir = params.hadoop_conf_dir
+    )
+    
+def main():
+  import sys
+  command_type = 'service_check'
+  command_data_file = '/root/workspace/Pig/input.json'
+  basedir = '/root/workspace/Pig/main'
+  sys.argv = ["", command_type, command_data_file, basedir]
+  
+  PigServiceCheck().execute()
+  
+if __name__ == "__main__":
+  #main()
+  PigServiceCheck().execute()
+  

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/templates/log4j.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/templates/log4j.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/templates/log4j.properties.j2
new file mode 100644
index 0000000..9ef6e2c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/templates/log4j.properties.j2
@@ -0,0 +1,30 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+# ***** Set root logger level to DEBUG and its only appender to A.
+log4j.logger.org.apache.pig=info, A
+
+# ***** A is set to be a ConsoleAppender.
+log4j.appender.A=org.apache.log4j.ConsoleAppender
+# ***** A uses PatternLayout.
+log4j.appender.A.layout=org.apache.log4j.PatternLayout
+log4j.appender.A.layout.ConversionPattern=%-4r [%t] %-5p %c %x - %m%n

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/templates/pig-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/templates/pig-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/templates/pig-env.sh.j2
new file mode 100644
index 0000000..b0e17d4
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/templates/pig-env.sh.j2
@@ -0,0 +1,17 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+JAVA_HOME={{java64_home}}
+HADOOP_HOME=${HADOOP_HOME:-{{hadoop_home}}}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/templates/pig.properties.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/templates/pig.properties.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/templates/pig.properties.j2
new file mode 100644
index 0000000..6fcb233
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/PIG/package/templates/pig.properties.j2
@@ -0,0 +1,55 @@
+#
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing,
+# software distributed under the License is distributed on an
+# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+# KIND, either express or implied.  See the License for the
+# specific language governing permissions and limitations
+# under the License.
+#
+#
+#
+
+# Pig configuration file. All values can be overwritten by command line arguments.
+
+# log4jconf log4j configuration file
+# log4jconf=./conf/log4j.properties
+
+# a file that contains pig script
+#file=
+
+# load jarfile, colon separated
+#jar=
+
+#verbose print all log messages to screen (default to print only INFO and above to screen)
+#verbose=true
+
+#exectype local|mapreduce, mapreduce is default
+#exectype=local
+
+#pig.logfile=
+
+#Do not spill temp files smaller than this size (bytes)
+#pig.spill.size.threshold=5000000
+#EXPERIMENT: Activate garbage collection when spilling a file bigger than this size (bytes)
+#This should help reduce the number of files being spilled.
+#pig.spill.gc.activation.size=40000000
+
+#the following two parameters are to help estimate the reducer number
+#pig.exec.reducers.bytes.per.reducer=1000000000
+#pig.exec.reducers.max=999
+
+#Use this option only when your Pig job will otherwise die because of
+#using more counter than hadoop configured limit
+#pig.disable.counter=true
+hcat.bin=/usr/bin/hcat

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/metainfo.xml
new file mode 100644
index 0000000..ea115c7
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/metainfo.xml
@@ -0,0 +1,60 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>SQOOP</name>
+      <comment>Tool for transferring bulk data between Apache Hadoop and
+        structured data stores such as relational databases
+      </comment>
+      <version>1.4.4.2.1.1</version>
+
+      <components>
+        <component>
+          <name>SQOOP</name>
+          <category>CLIENT</category>
+          <commandScript>
+            <script>scripts/sqoop_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+        </component>
+      </components>
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>sqoop</name>
+            </package>
+            <package>
+              <type>rpm</type>
+              <name>mysql-connector-java</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/scripts/__init__.py
new file mode 100644
index 0000000..5561e10
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/scripts/__init__.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/scripts/params.py
new file mode 100644
index 0000000..5655131
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/scripts/params.py
@@ -0,0 +1,37 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+security_enabled = config['configurations']['global']['security_enabled']
+smokeuser = config['configurations']['global']['smokeuser']
+user_group = config['configurations']['global']['user_group']
+
+sqoop_conf_dir = "/usr/lib/sqoop/conf"
+hbase_home = "/usr"
+hive_home = "/usr"
+zoo_conf_dir = "/etc/zookeeper"
+sqoop_lib = "/usr/lib/sqoop/lib"
+sqoop_user = "sqoop"
+
+keytab_path = config['configurations']['global']['keytab_path']
+smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
+kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/scripts/service_check.py
new file mode 100644
index 0000000..c42501a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/scripts/service_check.py
@@ -0,0 +1,37 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+
+from resource_management import *
+
+
+class SqoopServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+    if params.security_enabled:
+        Execute(format("{kinit_path_local}  -kt {smoke_user_keytab} {smokeuser}"))
+    Execute("sqoop version",
+            user = params.smokeuser,
+            logoutput = True
+    )
+
+if __name__ == "__main__":
+  SqoopServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/scripts/sqoop.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/scripts/sqoop.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/scripts/sqoop.py
new file mode 100644
index 0000000..148a833
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/scripts/sqoop.py
@@ -0,0 +1,52 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import sys
+
+def sqoop(type=None):
+  import params
+  Link(params.sqoop_lib + "/mysql-connector-java.jar",
+       to = '/usr/share/java/mysql-connector-java.jar'
+  )
+  Directory(params.sqoop_conf_dir,
+            owner = params.sqoop_user,
+            group = params.user_group
+  )
+  sqoop_TemplateConfig("sqoop-env.sh")
+  File (params.sqoop_conf_dir + "/sqoop-env-template.sh",
+          owner = params.sqoop_user,
+          group = params.user_group
+  )
+  File (params.sqoop_conf_dir + "/sqoop-site-template.xml",
+         owner = params.sqoop_user,
+         group = params.user_group
+  )
+  File (params.sqoop_conf_dir + "/sqoop-site.xml",
+         owner = params.sqoop_user,
+         group = params.user_group
+  )
+  pass
+
+def sqoop_TemplateConfig(name, tag=None):
+  import params
+  TemplateConfig( format("{sqoop_conf_dir}/{name}"),
+                  owner = params.sqoop_user,
+                  template_tag = tag
+  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/scripts/sqoop_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/scripts/sqoop_client.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/scripts/sqoop_client.py
new file mode 100644
index 0000000..6829557
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/scripts/sqoop_client.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+
+from sqoop import sqoop
+
+
+class SqoopClient(Script):
+  def install(self, env):
+    self.install_packages(env)
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+    sqoop(type='client')
+
+  def status(self, env):
+    raise ClientComponentHasNoStatus()
+
+if __name__ == "__main__":
+  SqoopClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/templates/sqoop-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/templates/sqoop-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/templates/sqoop-env.sh.j2
new file mode 100644
index 0000000..90cbc75
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/SQOOP/package/templates/sqoop-env.sh.j2
@@ -0,0 +1,36 @@
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.
+# The ASF licenses this file to You under the Apache License, Version 2.0
+# (the "License"); you may not use this file except in compliance with
+# the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# included in all the hadoop scripts with source command
+# should not be executable directly
+# also should not be passed any arguments, since we need original $*
+
+# Set Hadoop-specific environment variables here.
+
+#Set path to where bin/hadoop is available
+#Set path to where bin/hadoop is available
+export HADOOP_HOME=${HADOOP_HOME:-/usr/lib/hadoop}
+
+#set the path to where bin/hbase is available
+export HBASE_HOME=${HBASE_HOME:-/usr/lib/hbase}
+
+#Set the path to where bin/hive is available
+export HIVE_HOME=${HIVE_HOME:-/usr/lib/hive}
+
+#Set the path for where zookeper config dir is
+export ZOOCFGDIR=${ZOOCFGDIR:-/etc/zookeeper/conf}
+
+# add libthrift in hive to sqoop class path first so hive imports work
+export SQOOP_USER_CLASSPATH="`ls ${HIVE_HOME}/lib/libthrift-*.jar 2> /dev/null`:${SQOOP_USER_CLASSPATH}"

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/configuration/global.xml
new file mode 100644
index 0000000..5cc9170
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/configuration/global.xml
@@ -0,0 +1,39 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>storm_user</name>
+    <value>storm</value>
+    <description></description>
+  </property>
+  <property>
+    <name>storm_log_dir</name>
+    <value>/var/log/storm</value>
+    <description></description>
+  </property>
+  <property>
+    <name>storm_pid_dir</name>
+    <value>/var/run/storm</value>
+    <description></description>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/configuration/storm-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/configuration/storm-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/configuration/storm-site.xml
new file mode 100644
index 0000000..f81b3c3
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/configuration/storm-site.xml
@@ -0,0 +1,514 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>java.library.path</name>
+    <value>/usr/local/lib:/opt/local/lib:/usr/lib</value>
+    <description></description>
+  </property>
+  <property>
+    <name>storm.local.dir</name>
+    <value>/hadoop/storm</value>
+    <description>The place where jars are kept</description>
+  </property>
+  <property>
+    <name>storm.zookeeper.servers</name>
+    <value>['localhost']</value>
+    <description></description>
+  </property>
+  <property>
+    <name>storm.zookeeper.port</name>
+    <value>2181</value>
+    <description></description>
+  </property>
+  <property>
+    <name>storm.zookeeper.root</name>
+    <value>/storm</value>
+    <description></description>
+  </property>
+  <property>
+    <name>storm.zookeeper.session.timeout</name>
+    <value>20000</value>
+    <description></description>
+  </property>
+  <property>
+    <name>storm.zookeeper.connection.timeout</name>
+    <value>15000</value>
+    <description></description>
+  </property>
+  <property>
+    <name>storm.zookeeper.retry.times</name>
+    <value>5</value>
+    <description></description>
+  </property>
+  <property>
+    <name>storm.zookeeper.retry.interval</name>
+    <value>1000</value>
+    <description></description>
+  </property>
+  <property>
+    <name>storm.zookeeper.retry.intervalceiling.millis</name>
+    <value>30000</value>
+    <description></description>
+  </property>
+  <property>
+    <name>storm.cluster.mode</name>
+    <value>distributed</value>
+    <description>Can be distributed or local</description>
+  </property>
+  <property>
+    <name>storm.local.mode.zmq</name>
+    <value>false</value>
+    <description></description>
+  </property>
+  <property>
+    <name>storm.thrift.transport</name>
+    <value>backtype.storm.security.auth.SimpleTransportPlugin</value>
+    <description></description>
+  </property>
+  <property>
+    <name>storm.messaging.transport</name>
+    <value>backtype.storm.messaging.netty.Context</value>
+    <description></description>
+  </property>
+  <property>
+    <name>storm.messaging.transport</name>
+    <value>backtype.storm.messaging.netty.Context</value>
+    <description></description>
+  </property>
+  <property>
+    <name>nimbus.host</name>
+    <value>localhost</value>
+    <description></description>
+  </property>
+  <property>
+    <name>nimbus.thrift.port</name>
+    <value>6627</value>
+    <description></description>
+  </property>
+  <property>
+    <name>nimbus.thrift.max_buffer_size</name>
+    <value>1048576</value>
+    <description></description>
+  </property>
+  <property>
+    <name>nimbus.childopts</name>
+    <value>-Xmx1024m</value>
+    <description></description>
+  </property>
+  <property>
+    <name>nimbus.task.timeout.secs</name>
+    <value>30</value>
+    <description></description>
+  </property>
+  <property>
+    <name>nimbus.supervisor.timeout.secs</name>
+    <value>60</value>
+    <description></description>
+  </property>
+  <property>
+    <name>nimbus.monitor.freq.secs</name>
+    <value>10</value>
+    <description></description>
+  </property>
+  <property>
+    <name>nimbus.cleanup.inbox.freq.secs</name>
+    <value>600</value>
+    <description></description>
+  </property>
+  <property>
+    <name>nimbus.inbox.jar.expiration.secs</name>
+    <value>3600</value>
+    <description></description>
+  </property>
+  <property>
+    <name>nimbus.task.launch.secs</name>
+    <value>120</value>
+    <description></description>
+  </property>
+  <property>
+    <name>nimbus.reassign</name>
+    <value>true</value>
+    <description></description>
+  </property>
+  <property>
+    <name>nimbus.file.copy.expiration.secs</name>
+    <value>600</value>
+    <description></description>
+  </property>
+  <property>
+    <name>nimbus.topology.validator</name>
+    <value>backtype.storm.nimbus.DefaultTopologyValidator</value>
+    <description></description>
+  </property>
+  <property>
+    <name>ui.port</name>
+    <value>8744</value>
+    <description></description>
+  </property>
+  <property>
+    <name>ui.childopts</name>
+    <value>-Xmx768m</value>
+    <description></description>
+  </property>
+  <property>
+    <name>logviewer.port</name>
+    <value>8000</value>
+    <description></description>
+  </property>
+  <property>
+    <name>logviewer.port</name>
+    <value>8000</value>
+    <description></description>
+  </property>
+  <property>
+    <name>logviewer.childopts</name>
+    <value>-Xmx128m</value>
+    <description></description>
+  </property>
+  <property>
+    <name>logviewer.appender.name</name>
+    <value>A1</value>
+    <description></description>
+  </property>
+  <property>
+    <name>drpc.port</name>
+    <value>3772</value>
+    <description></description>
+  </property>
+  <property>
+    <name>drpc.worker.threads</name>
+    <value>64</value>
+    <description></description>
+  </property>
+  <property>
+    <name>drpc.queue.size</name>
+    <value>128</value>
+    <description></description>
+  </property>
+  <property>
+    <name>drpc.invocations.port</name>
+    <value>3773</value>
+    <description></description>
+  </property>
+  <property>
+    <name>drpc.request.timeout.secs</name>
+    <value>600</value>
+    <description></description>
+  </property>
+  <property>
+    <name>drpc.childopts</name>
+    <value>-Xmx768m</value>
+    <description></description>
+  </property>
+  <property>
+    <name>transactional.zookeeper.root</name>
+    <value>/transactional</value>
+    <description></description>
+  </property>
+  <property>
+    <name>transactional.zookeeper.servers</name>
+    <value>null</value>
+    <description></description>
+  </property>
+  <property>
+    <name>transactional.zookeeper.port</name>
+    <value>null</value>
+    <description></description>
+  </property>
+  <property>
+    <name>supervisor.slots.ports</name>
+    <value>[6700, 6701]</value>
+    <description></description>
+  </property>
+  <property>
+    <name>supervisor.childopts</name>
+    <value>-Xmx256m</value>
+    <description></description>
+  </property>
+  <property>
+    <name>supervisor.worker.start.timeout.secs</name>
+    <value>120</value>
+    <description>How long supervisor will wait to ensure that a worker process is started</description>
+  </property>
+  <property>
+    <name>supervisor.worker.timeout.secs</name>
+    <value>30</value>
+    <description>How long between heartbeats until supervisor considers that worker dead and tries to restart it</description>
+  </property>
+  <property>
+    <name>supervisor.monitor.frequency.secs</name>
+    <value>3</value>
+    <description>How frequently the supervisor checks on the status of the processes it's monitoring and restarts if necessary</description>
+  </property>
+  <property>
+    <name>supervisor.heartbeat.frequency.secs</name>
+    <value>5</value>
+    <description>How frequently the supervisor heartbeats to the cluster state (for nimbus)</description>
+  </property>
+  <property>
+    <name>supervisor.enable</name>
+    <value>true</value>
+    <description></description>
+  </property>
+  <property>
+    <name>worker.childopts</name>
+    <value>-Xmx768m</value>
+    <description></description>
+  </property>
+  <property>
+    <name>worker.heartbeat.frequency.secs</name>
+    <value>1</value>
+    <description></description>
+  </property>
+  <property>
+    <name>task.heartbeat.frequency.secs</name>
+    <value>3</value>
+    <description></description>
+  </property>
+  <property>
+    <name>task.refresh.poll.secs</name>
+    <value>10</value>
+    <description></description>
+  </property>
+  <property>
+    <name>zmq.threads</name>
+    <value>1</value>
+    <description></description>
+  </property>
+  <property>
+    <name>zmq.linger.millis</name>
+    <value>5000</value>
+    <description></description>
+  </property>
+  <property>
+    <name>zmq.hwm</name>
+    <value>0</value>
+    <description></description>
+  </property>
+  <property>
+    <name>storm.messaging.netty.server_worker_threads</name>
+    <value>1</value>
+    <description></description>
+  </property>
+  <property>
+    <name>storm.messaging.netty.client_worker_threads</name>
+    <value>1</value>
+    <description></description>
+  </property>
+  <property>
+    <name>storm.messaging.netty.buffer_size</name>
+    <value>5242880</value>
+    <description></description>
+  </property>
+  <property>
+    <name>storm.messaging.netty.max_retries</name>
+    <value>30</value>
+    <description></description>
+  </property>
+  <property>
+    <name>storm.messaging.netty.max_wait_ms</name>
+    <value>1000</value>
+    <description></description>
+  </property>
+  <property>
+    <name>storm.messaging.netty.min_wait_ms</name>
+    <value>100</value>
+    <description></description>
+  </property>
+  <property>
+    <name>topology.enable.message.timeouts</name>
+    <value>true</value>
+    <description></description>
+  </property>
+  <property>
+    <name>topology.debug</name>
+    <value>false</value>
+    <description></description>
+  </property>
+  <property>
+    <name>topology.optimize</name>
+    <value>true</value>
+    <description></description>
+  </property>
+  <property>
+    <name>topology.workers</name>
+    <value>1</value>
+    <description></description>
+  </property>
+  <property>
+    <name>topology.acker.executors</name>
+    <value>null</value>
+    <description></description>
+  </property>
+  <property>
+    <name>topology.message.timeout.secs</name>
+    <value>30</value>
+    <description>Maximum amount of time a message has to complete before it's considered failed</description>
+  </property>
+  <property>
+    <name>topology.skip.missing.kryo.registrations</name>
+    <value>false</value>
+    <description></description>
+  </property>
+  <property>
+    <name>topology.max.task.parallelism</name>
+    <value>null</value>
+    <description></description>
+  </property>
+  <property>
+    <name>topology.max.spout.pending</name>
+    <value>null</value>
+    <description></description>
+  </property>
+  <property>
+    <name>topology.state.synchronization.timeout.secs</name>
+    <value>60</value>
+    <description></description>
+  </property>
+  <property>
+    <name>topology.stats.sample.rate</name>
+    <value>0.05</value>
+    <description></description>
+  </property>
+  <property>
+    <name>topology.builtin.metrics.bucket.size.secs</name>
+    <value>60</value>
+    <description></description>
+  </property>
+  <property>
+    <name>topology.fall.back.on.java.serialization</name>
+    <value>true</value>
+    <description></description>
+  </property>
+  <property>
+    <name>topology.worker.childopts</name>
+    <value>null</value>
+    <description></description>
+  </property>
+  <property>
+    <name>topology.executor.receive.buffer.size</name>
+    <value>1024</value>
+    <description></description>
+  </property>
+  <property>
+    <name>topology.executor.send.buffer.size</name>
+    <value>1024</value>
+    <description></description>
+  </property>
+  <property>
+    <name>topology.receiver.buffer.size</name>
+    <value>8</value>
+    <description>Setting it too high causes a lot of problems (heartbeat thread gets starved, throughput plummets)</description>
+  </property>
+  <property>
+    <name>topology.transfer.buffer.size</name>
+    <value>1024</value>
+    <description></description>
+  </property>
+  <property>
+    <name>topology.tick.tuple.freq.secs</name>
+    <value>null</value>
+    <description></description>
+  </property>
+  <property>
+    <name>topology.worker.shared.thread.pool.size</name>
+    <value>4</value>
+    <description></description>
+  </property>
+  <property>
+    <name>topology.disruptor.wait.strategy</name>
+    <value>com.lmax.disruptor.BlockingWaitStrategy</value>
+    <description></description>
+  </property>
+  <property>
+    <name>topology.executor.send.buffer.size</name>
+    <value>1024</value>
+    <description></description>
+  </property>
+  <property>
+    <name>topology.receiver.buffer.size</name>
+    <value>8</value>
+    <description></description>
+  </property>
+  <property>
+    <name>topology.transfer.buffer.size</name>
+    <value>1024</value>
+    <description></description>
+  </property>
+  <property>
+    <name>topology.tick.tuple.freq.secs</name>
+    <value>null</value>
+    <description></description>
+  </property>
+  <property>
+    <name>topology.worker.shared.thread.pool.size</name>
+    <value>4</value>
+    <description></description>
+  </property>
+  <property>
+    <name>topology.disruptor.wait.strategy</name>
+    <value>com.lmax.disruptor.BlockingWaitStrategy</value>
+    <description></description>
+  </property>
+  <property>
+    <name>topology.spout.wait.strategy</name>
+    <value>backtype.storm.spout.SleepSpoutWaitStrategy</value>
+    <description></description>
+  </property>
+  <property>
+    <name>topology.sleep.spout.wait.strategy.time.ms</name>
+    <value>1</value>
+    <description></description>
+  </property>
+  <property>
+    <name>topology.error.throttle.interval.secs</name>
+    <value>10</value>
+    <description></description>
+  </property>
+  <property>
+    <name>topology.max.error.report.per.interval</name>
+    <value>5</value>
+    <description></description>
+  </property>
+  <property>
+    <name>topology.kryo.factory</name>
+    <value>backtype.storm.serialization.DefaultKryoFactory</value>
+    <description></description>
+  </property>
+  <property>
+    <name>topology.tuple.serializer</name>
+    <value>backtype.storm.serialization.types.ListDelegateSerializer</value>
+    <description></description>
+  </property>
+  <property>
+    <name>topology.trident.batch.emit.interval.millis</name>
+    <value>500</value>
+    <description></description>
+  </property>
+  <property>
+    <name>dev.zookeeper.path</name>
+    <value>/tmp/dev-storm-zookeeper</value>
+    <description></description>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/metainfo.xml
new file mode 100644
index 0000000..95dd954
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/metainfo.xml
@@ -0,0 +1,104 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>STORM</name>
+      <comment>Apache Hadoop Stream processing framework</comment>
+      <version>0.9.0.1</version>
+      <components>
+
+        <component>
+          <name>NIMBUS</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/nimbus.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>SUPERVISOR</name>
+          <category>SLAVE</category>
+          <commandScript>
+            <script>scripts/supervisor.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>STORM_UI_SERVER</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/ui_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>DRPC_SERVER</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/drpc_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>LOGVIEWER_SERVER</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/logviewer_server.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+        </component>
+      </components>
+
+
+      <!--
+      <osSpecifics>
+        <osSpecific>
+          <osType>any</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>storm</name>
+            </package>
+        </osSpecific>
+      </osSpecifics> -->
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>storm-site</config-type>
+        <config-type>global</config-type>
+      </configuration-dependencies>
+    </service>
+  </services>
+</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/files/wordCount.jar
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/files/wordCount.jar b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/files/wordCount.jar
new file mode 100644
index 0000000..aed64be
Binary files /dev/null and b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/files/wordCount.jar differ

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/scripts/drpc_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/scripts/drpc_server.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/scripts/drpc_server.py
new file mode 100644
index 0000000..325f86a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/scripts/drpc_server.py
@@ -0,0 +1,62 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from storm import storm
+from service import service
+from service_check import ServiceCheck
+
+
+class DrpcServer(Script):
+  def install(self, env):
+    self.install_packages(env)
+    # TODO remove
+    Execute("yum install http://s3.amazonaws.com/dev.hortonworks.com/storm/storm-0.9.1.2.0.6.1-1.el6.noarch.rpm -y",
+            ignore_failures = True)
+
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    storm()
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env)
+
+    service("drpc", action="start")
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    service("drpc", action="stop")
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.pid_drpc)
+
+if __name__ == "__main__":
+  DrpcServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/scripts/logviewer_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/scripts/logviewer_server.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/scripts/logviewer_server.py
new file mode 100644
index 0000000..c209036
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/scripts/logviewer_server.py
@@ -0,0 +1,62 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from storm import storm
+from service import service
+from service_check import ServiceCheck
+
+
+class LogviewerServer(Script):
+  def install(self, env):
+    self.install_packages(env)
+    # TODO remove
+    Execute("yum install http://s3.amazonaws.com/dev.hortonworks.com/storm/storm-0.9.1.2.0.6.1-1.el6.noarch.rpm -y",
+            ignore_failures = True)
+
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    storm()
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env)
+
+    service("logviewer", action="start")
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    service("logviewer", action="stop")
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.pid_logviewer)
+
+if __name__ == "__main__":
+  LogviewerServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/scripts/nimbus.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/scripts/nimbus.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/scripts/nimbus.py
new file mode 100644
index 0000000..7210314
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/scripts/nimbus.py
@@ -0,0 +1,62 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+import sys
+from resource_management import *
+from storm import storm
+from service import service
+from service_check import ServiceCheck
+
+
+class Nimbus(Script):
+  def install(self, env):
+    self.install_packages(env)
+    # TODO remove
+    Execute("yum install http://s3.amazonaws.com/dev.hortonworks.com/storm/storm-0.9.1.2.0.6.1-1.el6.noarch.rpm -y",
+            ignore_failures = True)
+
+    self.configure(env)
+
+  def configure(self, env):
+    import params
+    env.set_params(params)
+
+    storm()
+
+  def start(self, env):
+    import params
+    env.set_params(params)
+    self.configure(env)
+
+    service("nimbus", action="start")
+
+  def stop(self, env):
+    import params
+    env.set_params(params)
+
+    service("nimbus", action="stop")
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    check_process_status(status_params.pid_nimbus)
+
+if __name__ == "__main__":
+  Nimbus().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/scripts/params.py
new file mode 100644
index 0000000..57ff774
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/scripts/params.py
@@ -0,0 +1,34 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+import status_params
+
+# server configurations
+config = Script.get_config()
+
+storm_user = config['configurations']['global']['storm_user']
+log_dir = config['configurations']['global']['storm_log_dir']
+pid_dir = status_params.pid_dir
+conf_dir = "/etc/storm/conf"
+local_dir = config['configurations']['storm-site']['storm.local.dir']
+user_group = config['configurations']['global']['user_group']
+java64_home = config['hostLevelParams']['java_home']
+nimbus_host = config['configurations']['storm-site']['nimbus.host']

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/scripts/service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/scripts/service.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/scripts/service.py
new file mode 100644
index 0000000..721acf1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/scripts/service.py
@@ -0,0 +1,62 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+
+from resource_management import *
+from resource_management.core.shell import call
+import subprocess
+
+
+def service(
+    name,
+    action='start'):
+  import params
+  import status_params
+
+  pid_file = status_params.pid_files[name]
+
+  if action == "start":
+    cmd = ["/usr/bin/storm", name]
+    if name == "ui":
+      crt_pid_cmd = format("pgrep -f \"^java.+backtype.storm.ui.core$\" > {pid_file}")
+    else :
+      crt_pid_cmd = format("pgrep -f \"^java.+backtype.storm.daemon.{name}$\" > {pid_file}")
+
+    no_op_test = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
+
+    #Execute(cmd,
+    #        not_if=no_op_test,
+    #        user=params.storm_user
+    #)
+
+    #TODO run from storm user
+
+    if call(no_op_test)[0]:
+      subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env={"PATH":format("{java64_home}/bin:/bin")})
+
+    Execute(crt_pid_cmd,
+            logoutput=True,
+            tries=6,
+            try_sleep=10
+    )
+
+  elif action == "stop":
+    cmd = format("kill `cat {pid_file}` >/dev/null 2>&1 && rm -f {pid_file}")
+    Execute(cmd)

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/scripts/service_check.py
new file mode 100644
index 0000000..1cfbc48
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/scripts/service_check.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+class ServiceCheck(Script):
+  def service_check(self, env):
+    import params
+    env.set_params(params)
+
+    unique = get_unique_id_and_date()
+
+    File("/tmp/wordCount.jar",
+         content=StaticFile("wordCount.jar")
+    )
+
+    cmd = format("env PATH=$PATH:{java64_home}/bin storm jar /tmp/wordCount.jar storm.starter.WordCountTopology WordCount{unique} -c nimbus.host={nimbus_host}")
+
+    Execute(cmd,
+            logoutput=True
+    )
+
+    Execute(format("env PATH=$PATH:{java64_home}/bin storm kill WordCount{unique}"))
+
+if __name__ == "__main__":
+  ServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/scripts/status_params.py
new file mode 100644
index 0000000..70b034a
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/STORM/package/scripts/status_params.py
@@ -0,0 +1,35 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+from resource_management import *
+
+config = Script.get_config()
+
+pid_dir = config['configurations']['global']['storm_pid_dir']
+pid_nimbus = format("{pid_dir}/nimbus.pid")
+pid_supervisor = format("{pid_dir}/supervisor.pid")
+pid_drpc = format("{pid_dir}/drpc.pid")
+pid_ui = format("{pid_dir}/ui.pid")
+pid_logviewer = format("{pid_dir}/logviewer.pid")
+
+pid_files = {"logviewer":pid_logviewer,
+             "ui": pid_ui,
+             "nimbus": pid_nimbus,
+             "supervisor": pid_supervisor,
+             "drpc": pid_drpc}
\ No newline at end of file


[16/37] AMBARI-4341. Rename 2.0.8 to 2.1.1 in the stack definition. (mahadev)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/files/validateYarnComponentStatus.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/files/validateYarnComponentStatus.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/files/validateYarnComponentStatus.py
deleted file mode 100644
index dac198a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/files/validateYarnComponentStatus.py
+++ /dev/null
@@ -1,165 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import optparse
-import subprocess
-import json
-
-RESOURCEMANAGER = 'rm'
-NODEMANAGER = 'nm'
-HISTORYSERVER = 'hs'
-
-STARTED_STATE = 'STARTED'
-RUNNING_STATE = 'RUNNING'
-
-#Return reponse for given path and address
-def getResponse(path, address, ssl_enabled):
-
-  command = "curl"
-  httpGssnegotiate = "--negotiate"
-  userpswd = "-u:"
-  insecure = "-k"# This is smoke test, no need to check CA of server
-  if ssl_enabled:
-    url = 'https://' + address + path
-  else:
-    url = 'http://' + address + path
-      
-  command_with_flags = [command,httpGssnegotiate,userpswd,insecure,url]
-  try:
-    proc = subprocess.Popen(command_with_flags, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-    (stdout, stderr) = proc.communicate()
-    response = json.loads(stdout)
-    if response == None:
-      print 'There is no response for url: ' + str(url)
-      exit(1)
-    return response
-  except Exception as e:
-    print 'Error getting response for url:' + str(url), e
-    exit(1)
-
-#Verify that REST api is available for given component
-def validateAvailability(component, path, address, ssl_enabled):
-
-  try:
-    response = getResponse(path, address, ssl_enabled)
-    is_valid = validateAvailabilityResponse(component, response)
-    if not is_valid:
-      exit(1)
-  except Exception as e:
-    print 'Error checking availability status of component', e
-    exit(1)
-
-#Validate component-specific response
-def validateAvailabilityResponse(component, response):
-  try:
-    if component == RESOURCEMANAGER:
-      rm_state = response['clusterInfo']['state']
-      if rm_state == STARTED_STATE:
-        return True
-      else:
-        print 'Resourcemanager is not started'
-        return False
-
-    elif component == NODEMANAGER:
-      node_healthy = bool(response['nodeInfo']['nodeHealthy'])
-      if node_healthy:
-        return True
-      else:
-        return False
-    elif component == HISTORYSERVER:
-      hs_start_time = response['historyInfo']['startedOn']
-      if hs_start_time > 0:
-        return True
-      else:
-        return False
-    else:
-      return False
-  except Exception as e:
-    print 'Error validation of availability response for ' + str(component), e
-    return False
-
-#Verify that component has required resources to work
-def validateAbility(component, path, address, ssl_enabled):
-
-  try:
-    response = getResponse(path, address, ssl_enabled)
-    is_valid = validateAbilityResponse(component, response)
-    if not is_valid:
-      exit(1)
-  except Exception as e:
-    print 'Error checking ability of component', e
-    exit(1)
-
-#Validate component-specific response that it has required resources to work
-def validateAbilityResponse(component, response):
-  try:
-    if component == RESOURCEMANAGER:
-      nodes = []
-      if response.has_key('nodes') and not response['nodes'] == None and response['nodes'].has_key('node'):
-        nodes = response['nodes']['node']
-      connected_nodes_count = len(nodes)
-      if connected_nodes_count == 0:
-        print 'There is no connected nodemanagers to resourcemanager'
-        return False
-      active_nodes = filter(lambda x: x['state'] == RUNNING_STATE, nodes)
-      active_nodes_count = len(active_nodes)
-
-      if connected_nodes_count == 0:
-        print 'There is no connected active nodemanagers to resourcemanager'
-        return False
-      else:
-        return True
-    else:
-      return False
-  except Exception as e:
-    print 'Error validation of ability response', e
-    return False
-
-#
-# Main.
-#
-def main():
-  parser = optparse.OptionParser(usage="usage: %prog [options] component ")
-  parser.add_option("-p", "--port", dest="address", help="Host:Port for REST API of a desired component")
-  parser.add_option("-s", "--ssl", dest="ssl_enabled", help="Is SSL enabled for UI of component")
-
-  (options, args) = parser.parse_args()
-
-  component = args[0]
-  
-  address = options.address
-  ssl_enabled = (options.ssl_enabled) in 'true'
-  if component == RESOURCEMANAGER:
-    path = '/ws/v1/cluster/info'
-  elif component == NODEMANAGER:
-    path = '/ws/v1/node/info'
-  elif component == HISTORYSERVER:
-    path = '/ws/v1/history/info'
-  else:
-    parser.error("Invalid component")
-
-  validateAvailability(component, path, address, ssl_enabled)
-
-  if component == RESOURCEMANAGER:
-    path = '/ws/v1/cluster/nodes'
-    validateAbility(component, path, address, ssl_enabled)
-
-if __name__ == "__main__":
-  main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/__init__.py
deleted file mode 100644
index a582077..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/__init__.py
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/historyserver.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/historyserver.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/historyserver.py
deleted file mode 100644
index 9b6003c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/historyserver.py
+++ /dev/null
@@ -1,60 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-import sys
-from resource_management import *
-
-from yarn import yarn
-from service import service
-
-class Histroryserver(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    yarn()
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env) # FOR SECURITY
-    service('historyserver',
-            action='start'
-    )
-
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    service('historyserver',
-            action='stop'
-    )
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.histroyserver_pid_file)
-
-if __name__ == "__main__":
-  Histroryserver().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/mapred_service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/mapred_service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/mapred_service_check.py
deleted file mode 100644
index 3b789f8..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/mapred_service_check.py
+++ /dev/null
@@ -1,74 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-class MapReduce2ServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    jar_path = format("{hadoop_mapred2_jar_location}/{hadoopMapredExamplesJarName}")
-    input_file = format("/user/{smokeuser}/mapredsmokeinput")
-    output_file = format("/user/{smokeuser}/mapredsmokeoutput")
-
-    cleanup_cmd = format("fs -rm -r -f {output_file} {input_file}")
-    create_file_cmd = format("fs -put /etc/passwd {input_file}")
-    test_cmd = format("fs -test -e {output_file}")
-    run_wordcount_job = format("jar {jar_path} wordcount {input_file} {output_file}")
-
-    if params.security_enabled:
-      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser};")
-
-      Execute(kinit_cmd,
-              user=params.smokeuser
-      )
-
-    ExecuteHadoop(cleanup_cmd,
-                  tries=1,
-                  try_sleep=5,
-                  user=params.smokeuser,
-                  conf_dir=params.hadoop_conf_dir
-    )
-
-    ExecuteHadoop(create_file_cmd,
-                  tries=1,
-                  try_sleep=5,
-                  user=params.smokeuser,
-                  conf_dir=params.hadoop_conf_dir
-    )
-
-    ExecuteHadoop(run_wordcount_job,
-                  tries=1,
-                  try_sleep=5,
-                  user=params.smokeuser,
-                  conf_dir=params.hadoop_conf_dir,
-                  logoutput=True
-    )
-
-    ExecuteHadoop(test_cmd,
-                  user=params.smokeuser,
-                  conf_dir=params.hadoop_conf_dir
-    )
-
-if __name__ == "__main__":
-  MapReduce2ServiceCheck().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/mapreduce2_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/mapreduce2_client.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/mapreduce2_client.py
deleted file mode 100644
index 54119a7..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/mapreduce2_client.py
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-from resource_management import *
-
-from yarn import yarn
-
-class MapReduce2Client(Script):
-
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    yarn()
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-if __name__ == "__main__":
-  MapReduce2Client().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/nodemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/nodemanager.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/nodemanager.py
deleted file mode 100644
index dbeaca0..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/nodemanager.py
+++ /dev/null
@@ -1,61 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-from resource_management import *
-
-from yarn import yarn
-from service import service
-
-class Nodemanager(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    yarn()
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env) # FOR SECURITY
-    service('nodemanager',
-            action='start'
-    )
-
-  def stop(self, env):
-    import params
-    env.set_params(params)
-
-    service('nodemanager',
-            action='stop'
-    )
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.nodemanager_pid_file)
-
-if __name__ == "__main__":
-  Nodemanager().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/params.py
deleted file mode 100644
index f1b22bc..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/params.py
+++ /dev/null
@@ -1,89 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-import status_params
-
-# server configurations
-config = Script.get_config()
-
-config_dir = "/etc/hadoop/conf"
-
-mapred_user = status_params.mapred_user
-yarn_user = status_params.yarn_user
-hdfs_user = config['configurations']['global']['hdfs_user']
-
-smokeuser = config['configurations']['global']['smokeuser']
-security_enabled = config['configurations']['global']['security_enabled']
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-yarn_executor_container_group = config['configurations']['yarn-site']['yarn.nodemanager.linux-container-executor.group']
-kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-rm_host = config['clusterHostInfo']['rm_host'][0]
-rm_port = config['configurations']['yarn-site']['yarn.resourcemanager.webapp.address'].split(':')[-1]
-rm_https_port = "8090"
-
-java64_home = config['hostLevelParams']['java_home']
-hadoop_ssl_enabled = default("/configurations/core-site/hadoop.ssl.enabled", False)
-
-hadoop_libexec_dir = '/usr/lib/hadoop/libexec'
-hadoop_yarn_home = '/usr/lib/hadoop-yarn'
-yarn_heapsize = config['configurations']['global']['yarn_heapsize']
-resourcemanager_heapsize = config['configurations']['global']['resourcemanager_heapsize']
-nodemanager_heapsize = config['configurations']['global']['nodemanager_heapsize']
-
-yarn_log_dir_prefix = config['configurations']['global']['yarn_log_dir_prefix']
-yarn_pid_dir_prefix = status_params.yarn_pid_dir_prefix
-mapred_pid_dir_prefix = status_params.mapred_pid_dir_prefix
-mapred_log_dir_prefix = config['configurations']['global']['mapred_log_dir_prefix']
-
-rm_webui_address = format("{rm_host}:{rm_port}")
-rm_webui_https_address = format("{rm_host}:{rm_https_port}")
-nm_webui_address = config['configurations']['yarn-site']['yarn.nodemanager.webapp.address']
-hs_webui_address = config['configurations']['mapred-site']['mapreduce.jobhistory.webapp.address']
-
-nm_local_dirs = config['configurations']['yarn-site']['yarn.nodemanager.local-dirs']
-nm_log_dirs = config['configurations']['yarn-site']['yarn.nodemanager.log-dirs']
-
-
-hadoop_mapred2_jar_location = "/usr/lib/hadoop-mapreduce"
-distrAppJarName = "hadoop-yarn-applications-distributedshell-2.*.jar"
-hadoopMapredExamplesJarName = "hadoop-mapreduce-examples-2.*.jar"
-
-yarn_pid_dir = status_params.yarn_pid_dir
-mapred_pid_dir = status_params.mapred_pid_dir
-
-mapred_log_dir = format("{mapred_log_dir_prefix}/{mapred_user}")
-yarn_log_dir = format("{yarn_log_dir_prefix}/{yarn_user}")
-mapred_job_summary_log = format("{mapred_log_dir_prefix}/{mapred_user}/hadoop-mapreduce.jobsummary.log")
-yarn_job_summary_log = format("{yarn_log_dir_prefix}/{yarn_user}/hadoop-mapreduce.jobsummary.log")
-
-mapred_bin = "/usr/lib/hadoop-mapreduce/sbin"
-yarn_bin = "/usr/lib/hadoop-yarn/sbin"
-
-user_group = config['configurations']['global']['user_group']
-limits_conf_dir = "/etc/security/limits.d"
-hadoop_conf_dir = "/etc/hadoop/conf"
-yarn_container_bin = "/usr/lib/hadoop-yarn/bin"
-
-#exclude file
-exclude_hosts = default("/clusterHostInfo/decom_nm_hosts", [])
-exclude_file_path = config['configurations']['yarn-site']['yarn.resourcemanager.nodes.exclude-path']
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/resourcemanager.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/resourcemanager.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/resourcemanager.py
deleted file mode 100644
index 0540670..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/resourcemanager.py
+++ /dev/null
@@ -1,112 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-from resource_management import *
-
-from yarn import yarn
-from service import service
-
-
-class Resourcemanager(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-
-    env.set_params(params)
-    yarn()
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-    self.configure(env) # FOR SECURITY
-    service('resourcemanager',
-            action='start'
-    )
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-
-    service('resourcemanager',
-            action='stop'
-    )
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    check_process_status(status_params.resourcemanager_pid_file)
-    pass
-
-  def decommission(self, env):
-    import params
-
-    env.set_params(params)
-
-    yarn_user = params.yarn_user
-    conf_dir = params.config_dir
-    user_group = params.user_group
-
-    yarn_refresh_cmd = format("/usr/bin/yarn --config {conf_dir} rmadmin -refreshNodes")
-
-    File(params.exclude_file_path,
-         content=Template("exclude_hosts_list.j2"),
-         owner=yarn_user,
-         group=user_group
-    )
-
-    Execute(yarn_refresh_cmd,
-            user=yarn_user
-    )
-    pass
-
-
-if __name__ == "__main__":
-  Resourcemanager().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/service.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/service.py
deleted file mode 100644
index 441ef6c..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/service.py
+++ /dev/null
@@ -1,65 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-
-def service(
-    name,
-    action='start'):
-
-  import params
-
-  if (name == 'historyserver'):
-    daemon = format("{mapred_bin}/mr-jobhistory-daemon.sh")
-    pid_file = format("{mapred_pid_dir}/mapred-{mapred_user}-{name}.pid")
-    usr = params.mapred_user
-  else:
-    daemon = format("{yarn_bin}/yarn-daemon.sh")
-    pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-{name}.pid")
-    usr = params.yarn_user
-
-  cmd = format("export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && {daemon} --config {config_dir}")
-
-  if action == 'start':
-    daemon_cmd = format("{cmd} start {name}")
-    no_op = format("ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
-    Execute(daemon_cmd,
-            user=usr,
-            not_if=no_op
-    )
-
-    Execute(no_op,
-            user=usr,
-            not_if=no_op,
-            initial_wait=5
-    )
-
-  elif action == 'stop':
-    daemon_cmd = format("{cmd} stop {name}")
-    Execute(daemon_cmd,
-            user=usr,
-    )
-    rm_pid = format("rm -f {pid_file}")
-    Execute(rm_pid,
-            user=usr
-    )
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/service_check.py
deleted file mode 100644
index c53cc78..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/service_check.py
+++ /dev/null
@@ -1,67 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-class ServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    run_yarn_check_cmd = "/usr/bin/yarn node -list"
-
-    component_type = 'rm'
-    if params.hadoop_ssl_enabled:
-      component_address = params.rm_webui_https_address
-    else:
-      component_address = params.rm_webui_address
-
-    validateStatusFileName = "validateYarnComponentStatus.py"
-    validateStatusFilePath = format("/tmp/{validateStatusFileName}")
-
-    validateStatusCmd = format("{validateStatusFilePath} {component_type} -p {component_address} -s {hadoop_ssl_enabled}")
-
-    if params.security_enabled:
-      kinit_cmd = format("{kinit_path_local} -kt {smoke_user_keytab} {smokeuser};")
-      smoke_cmd = format("{kinit_cmd} {validateStatusCmd}")
-    else:
-      smoke_cmd = validateStatusCmd
-
-    File(validateStatusFilePath,
-         content=StaticFile(validateStatusFileName),
-         mode=0755
-    )
-
-    Execute(smoke_cmd,
-            tries=3,
-            try_sleep=5,
-            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-            user=params.smokeuser,
-            logoutput=True
-    )
-
-    Execute(run_yarn_check_cmd,
-                  user=params.smokeuser
-    )
-
-if __name__ == "__main__":
-  ServiceCheck().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/status_params.py
deleted file mode 100644
index e554513..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/status_params.py
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-mapred_user = config['configurations']['global']['mapred_user']
-yarn_user = config['configurations']['global']['yarn_user']
-yarn_pid_dir_prefix = config['configurations']['global']['yarn_pid_dir_prefix']
-mapred_pid_dir_prefix = config['configurations']['global']['mapred_pid_dir_prefix']
-yarn_pid_dir = format("{yarn_pid_dir_prefix}/{yarn_user}")
-mapred_pid_dir = format("{mapred_pid_dir_prefix}/{mapred_user}")
-
-resourcemanager_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-resourcemanager.pid")
-nodemanager_pid_file = format("{yarn_pid_dir}/yarn-{yarn_user}-nodemanager.pid")
-histroyserver_pid_file = format("{mapred_pid_dir}/mapred-{mapred_user}-historyserver.pid")
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/yarn.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/yarn.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/yarn.py
deleted file mode 100644
index 1d97373..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/yarn.py
+++ /dev/null
@@ -1,126 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-import sys
-
-
-def yarn():
-  import params
-
-  Directory([params.yarn_pid_dir, params.yarn_log_dir],
-            owner=params.yarn_user,
-            group=params.user_group,
-            recursive=True
-  )
-
-  Directory([params.mapred_pid_dir, params.mapred_log_dir],
-            owner=params.mapred_user,
-            group=params.user_group,
-            recursive=True
-  )
-
-  Directory([params.nm_local_dirs, params.nm_log_dirs, params.yarn_log_dir_prefix],
-            owner=params.yarn_user,
-            recursive=True
-  )
-
-  XmlConfig("core-site.xml",
-            conf_dir=params.config_dir,
-            configurations=params.config['configurations']['core-site'],
-            owner=params.hdfs_user,
-            group=params.user_group,
-            mode=0644
-  )
-
-  XmlConfig("mapred-site.xml",
-            conf_dir=params.config_dir,
-            configurations=params.config['configurations']['mapred-site'],
-            owner=params.yarn_user,
-            group=params.user_group,
-            mode=0644
-  )
-
-  XmlConfig("yarn-site.xml",
-            conf_dir=params.config_dir,
-            configurations=params.config['configurations']['yarn-site'],
-            owner=params.yarn_user,
-            group=params.user_group,
-            mode=0644
-  )
-
-  XmlConfig("capacity-scheduler.xml",
-            conf_dir=params.config_dir,
-            configurations=params.config['configurations']['capacity-scheduler'],
-            owner=params.yarn_user,
-            group=params.user_group,
-            mode=0644
-  )
-
-  File(params.yarn_job_summary_log,
-       owner=params.yarn_user,
-       group=params.user_group
-  )
-
-  File(params.mapred_job_summary_log,
-       owner=params.mapred_user,
-       group=params.user_group
-  )
-
-  File(format("{limits_conf_dir}/yarn.conf"),
-       mode=0644,
-       content=Template('yarn.conf.j2')
-  )
-
-  File(format("{limits_conf_dir}/mapreduce.conf"),
-       mode=0644,
-       content=Template('mapreduce.conf.j2')
-  )
-
-  File(format("{config_dir}/yarn-env.sh"),
-       owner=params.yarn_user,
-       group=params.user_group,
-       mode=0755,
-       content=Template('yarn-env.sh.j2')
-  )
-
-  File(format("{config_dir}/hadoop-env.sh"),
-       owner=params.hdfs_user,
-       group=params.user_group,
-       mode=0755,
-       content=StaticFile(format('{hadoop_conf_dir}/hadoop-env.sh'))
-  )
-
-  if params.security_enabled:
-    container_executor = format("{yarn_container_bin}/container-executor")
-    File(container_executor,
-         group=params.yarn_executor_container_group,
-         mode=06050
-    )
-    
-    File(format("{config_dir}/container-executor.cfg"),
-         group=params.user_group,
-         mode=0644,
-         content=Template('container-executor.cfg.j2')
-    )
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/yarn_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/yarn_client.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/yarn_client.py
deleted file mode 100644
index 7e9c564..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/scripts/yarn_client.py
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-from resource_management import *
-
-from yarn import yarn
-
-class YarnClient(Script):
-
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    yarn()
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-if __name__ == "__main__":
-  YarnClient().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/templates/container-executor.cfg.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/templates/container-executor.cfg.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/templates/container-executor.cfg.j2
deleted file mode 100644
index 29ad949..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/templates/container-executor.cfg.j2
+++ /dev/null
@@ -1,22 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-yarn.nodemanager.local-dirs={{nm_local_dirs}}
-yarn.nodemanager.log-dirs={{nm_log_dirs}}
-yarn.nodemanager.linux-container-executor.group={{yarn_executor_container_group}}
-banned.users = hfds,yarn,mapred,bin
-min.user.id=1000

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/templates/exclude_hosts_list.j2
deleted file mode 100644
index 4a4c698..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/templates/exclude_hosts_list.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-{% for host in exclude_hosts %}
-{{host}}
-{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/templates/mapreduce.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/templates/mapreduce.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/templates/mapreduce.conf.j2
deleted file mode 100644
index 76caea4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/templates/mapreduce.conf.j2
+++ /dev/null
@@ -1,17 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-{{mapred_user}}   - nofile 32768
-{{mapred_user}}   - nproc  65536

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/templates/yarn-env.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/templates/yarn-env.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/templates/yarn-env.sh.j2
deleted file mode 100644
index 70bb71a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/templates/yarn-env.sh.j2
+++ /dev/null
@@ -1,119 +0,0 @@
-#/*
-# * Licensed to the Apache Software Foundation (ASF) under one
-# * or more contributor license agreements.  See the NOTICE file
-# * distributed with this work for additional information
-# * regarding copyright ownership.  The ASF licenses this file
-# * to you under the Apache License, Version 2.0 (the
-# * "License"); you may not use this file except in compliance
-# * with the License.  You may obtain a copy of the License at
-# *
-# *     http://www.apache.org/licenses/LICENSE-2.0
-# *
-# * Unless required by applicable law or agreed to in writing, software
-# * distributed under the License is distributed on an "AS IS" BASIS,
-# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# * See the License for the specific language governing permissions and
-# * limitations under the License.
-# */
-
-export HADOOP_YARN_HOME={{hadoop_yarn_home}}
-export YARN_LOG_DIR={{yarn_log_dir_prefix}}/$USER
-export YARN_PID_DIR={{yarn_pid_dir_prefix}}/$USER
-export HADOOP_LIBEXEC_DIR={{hadoop_libexec_dir}}
-export JAVA_HOME={{java64_home}}
-
-# User for YARN daemons
-export HADOOP_YARN_USER=${HADOOP_YARN_USER:-yarn}
-
-# resolve links - $0 may be a softlink
-export YARN_CONF_DIR="${YARN_CONF_DIR:-$HADOOP_YARN_HOME/conf}"
-
-# some Java parameters
-# export JAVA_HOME=/home/y/libexec/jdk1.6.0/
-if [ "$JAVA_HOME" != "" ]; then
-  #echo "run java in $JAVA_HOME"
-  JAVA_HOME=$JAVA_HOME
-fi
-
-if [ "$JAVA_HOME" = "" ]; then
-  echo "Error: JAVA_HOME is not set."
-  exit 1
-fi
-
-JAVA=$JAVA_HOME/bin/java
-JAVA_HEAP_MAX=-Xmx1000m
-
-# For setting YARN specific HEAP sizes please use this
-# Parameter and set appropriately
-YARN_HEAPSIZE={{yarn_heapsize}}
-
-# check envvars which might override default args
-if [ "$YARN_HEAPSIZE" != "" ]; then
-  JAVA_HEAP_MAX="-Xmx""$YARN_HEAPSIZE""m"
-fi
-
-# Resource Manager specific parameters
-
-# Specify the max Heapsize for the ResourceManager using a numerical value
-# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
-# the value to 1000.
-# This value will be overridden by an Xmx setting specified in either YARN_OPTS
-# and/or YARN_RESOURCEMANAGER_OPTS.
-# If not specified, the default value will be picked from either YARN_HEAPMAX
-# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
-export YARN_RESOURCEMANAGER_HEAPSIZE={{resourcemanager_heapsize}}
-
-# Specify the JVM options to be used when starting the ResourceManager.
-# These options will be appended to the options specified as YARN_OPTS
-# and therefore may override any similar flags set in YARN_OPTS
-#export YARN_RESOURCEMANAGER_OPTS=
-
-# Node Manager specific parameters
-
-# Specify the max Heapsize for the NodeManager using a numerical value
-# in the scale of MB. For example, to specify an jvm option of -Xmx1000m, set
-# the value to 1000.
-# This value will be overridden by an Xmx setting specified in either YARN_OPTS
-# and/or YARN_NODEMANAGER_OPTS.
-# If not specified, the default value will be picked from either YARN_HEAPMAX
-# or JAVA_HEAP_MAX with YARN_HEAPMAX as the preferred option of the two.
-export YARN_NODEMANAGER_HEAPSIZE={{nodemanager_heapsize}}
-
-# Specify the JVM options to be used when starting the NodeManager.
-# These options will be appended to the options specified as YARN_OPTS
-# and therefore may override any similar flags set in YARN_OPTS
-#export YARN_NODEMANAGER_OPTS=
-
-# so that filenames w/ spaces are handled correctly in loops below
-IFS=
-
-
-# default log directory & file
-if [ "$YARN_LOG_DIR" = "" ]; then
-  YARN_LOG_DIR="$HADOOP_YARN_HOME/logs"
-fi
-if [ "$YARN_LOGFILE" = "" ]; then
-  YARN_LOGFILE='yarn.log'
-fi
-
-# default policy file for service-level authorization
-if [ "$YARN_POLICYFILE" = "" ]; then
-  YARN_POLICYFILE="hadoop-policy.xml"
-fi
-
-# restore ordinary behaviour
-unset IFS
-
-
-YARN_OPTS="$YARN_OPTS -Dhadoop.log.dir=$YARN_LOG_DIR"
-YARN_OPTS="$YARN_OPTS -Dyarn.log.dir=$YARN_LOG_DIR"
-YARN_OPTS="$YARN_OPTS -Dhadoop.log.file=$YARN_LOGFILE"
-YARN_OPTS="$YARN_OPTS -Dyarn.log.file=$YARN_LOGFILE"
-YARN_OPTS="$YARN_OPTS -Dyarn.home.dir=$YARN_COMMON_HOME"
-YARN_OPTS="$YARN_OPTS -Dyarn.id.str=$YARN_IDENT_STRING"
-YARN_OPTS="$YARN_OPTS -Dhadoop.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
-YARN_OPTS="$YARN_OPTS -Dyarn.root.logger=${YARN_ROOT_LOGGER:-INFO,console}"
-if [ "x$JAVA_LIBRARY_PATH" != "x" ]; then
-  YARN_OPTS="$YARN_OPTS -Djava.library.path=$JAVA_LIBRARY_PATH"
-fi
-YARN_OPTS="$YARN_OPTS -Dyarn.policy.file=$YARN_POLICYFILE"

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/templates/yarn.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/templates/yarn.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/templates/yarn.conf.j2
deleted file mode 100644
index be89b07..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/YARN/package/templates/yarn.conf.j2
+++ /dev/null
@@ -1,17 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-{{yarn_user}}   - nofile 32768
-{{yarn_user}}   - nproc  65536

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/configuration/global.xml
deleted file mode 100644
index f78df89..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/configuration/global.xml
+++ /dev/null
@@ -1,75 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
--->
-
-<configuration>
-  <property>
-    <name>zk_user</name>
-    <value>zookeeper</value>
-    <description>ZooKeeper User.</description>
-  </property>
-  <property>
-    <name>zookeeperserver_host</name>
-    <value></value>
-    <description>ZooKeeper Server Hosts.</description>
-  </property>
-  <property>
-    <name>zk_data_dir</name>
-    <value>/hadoop/zookeeper</value>
-    <description>Data directory for ZooKeeper.</description>
-  </property>
-  <property>
-    <name>zk_log_dir</name>
-    <value>/var/log/zookeeper</value>
-    <description>ZooKeeper Log Dir</description>
-  </property>
-  <property>
-    <name>zk_pid_dir</name>
-    <value>/var/run/zookeeper</value>
-    <description>ZooKeeper Pid Dir</description>
-  </property>
-  <property>
-    <name>zk_pid_file</name>
-    <value>/var/run/zookeeper/zookeeper_server.pid</value>
-    <description>ZooKeeper Pid File</description>
-  </property>
-  <property>
-    <name>tickTime</name>
-    <value>2000</value>
-    <description>The length of a single tick in milliseconds, which is the basic time unit used by ZooKeeper</description>
-  </property>
-  <property>
-    <name>initLimit</name>
-    <value>10</value>
-    <description>Ticks to allow for sync at Init.</description>
-  </property>
-  <property>
-    <name>syncLimit</name>
-    <value>5</value>
-    <description>Ticks to allow for sync at Runtime.</description>
-  </property>
-  <property>
-    <name>clientPort</name>
-    <value>2181</value>
-    <description>Port for running ZK Server.</description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/metainfo.xml
deleted file mode 100644
index ee5b3e0..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/metainfo.xml
+++ /dev/null
@@ -1,70 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>ZOOKEEPER</name>
-      <comment>Centralized service which provides highly reliable distributed coordination</comment>
-      <version>3.4.5.2.0.6.0</version>
-      <components>
-
-        <component>
-          <name>ZOOKEEPER_SERVER</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/zookeeper_server.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>ZOOKEEPER_CLIENT</name>
-          <category>CLIENT</category>
-          <commandScript>
-            <script>scripts/zookeeper_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>zookeeper</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <configuration-dependencies>
-        <config-type>global</config-type>
-      </configuration-dependencies>
-    </service>
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/files/zkEnv.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/files/zkEnv.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/files/zkEnv.sh
deleted file mode 100644
index 07017e1..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/files/zkEnv.sh
+++ /dev/null
@@ -1,96 +0,0 @@
-#!/bin/sh
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-# This script should be sourced into other zookeeper
-# scripts to setup the env variables
-
-# We use ZOOCFGDIR if defined,
-# otherwise we use /etc/zookeeper
-# or the conf directory that is
-# a sibling of this script's directory
-if [ "x$ZOOCFGDIR" = "x" ]
-then
-    if [ -d "/etc/zookeeper" ]
-    then
-        ZOOCFGDIR="/etc/zookeeper"
-    else
-        ZOOCFGDIR="$ZOOBINDIR/../conf"
-    fi
-fi
-
-if [ "x$ZOOCFG" = "x" ]
-then
-    ZOOCFG="zoo.cfg"
-fi
-
-ZOOCFG="$ZOOCFGDIR/$ZOOCFG"
-
-if [ -e "$ZOOCFGDIR/zookeeper-env.sh" ]
-then
-    . "$ZOOCFGDIR/zookeeper-env.sh"
-fi
-
-if [ "x${ZOO_LOG_DIR}" = "x" ]
-then
-    ZOO_LOG_DIR="."
-fi
-
-if [ "x${ZOO_LOG4J_PROP}" = "x" ]
-then
-    ZOO_LOG4J_PROP="INFO,CONSOLE"
-fi
-
-#add the zoocfg dir to classpath
-CLASSPATH="$ZOOCFGDIR:$CLASSPATH"
-
-for i in "$ZOOBINDIR"/../src/java/lib/*.jar
-do
-    CLASSPATH="$i:$CLASSPATH"
-done
-
-#make it work in the release
-for i in "$ZOOBINDIR"/../lib/*.jar
-do
-    CLASSPATH="$i:$CLASSPATH"
-done
-
-#make it work in the release
-for i in "$ZOOBINDIR"/../zookeeper-*.jar
-do
-    CLASSPATH="$i:$CLASSPATH"
-done
-
-#make it work for developers
-for d in "$ZOOBINDIR"/../build/lib/*.jar
-do
-   CLASSPATH="$d:$CLASSPATH"
-done
-
-#make it work for developers
-CLASSPATH="$ZOOBINDIR/../build/classes:$CLASSPATH"
-
-case "`uname`" in
-    CYGWIN*) cygwin=true ;;
-    *) cygwin=false ;;
-esac
-
-if $cygwin
-then
-    CLASSPATH=`cygpath -wp "$CLASSPATH"`
-fi
-
-#echo "CLASSPATH=$CLASSPATH"

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/files/zkServer.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/files/zkServer.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/files/zkServer.sh
deleted file mode 100644
index 49ceb4d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/files/zkServer.sh
+++ /dev/null
@@ -1,120 +0,0 @@
-#!/bin/sh
-
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
-#
-# If this scripted is run out of /usr/bin or some other system bin directory
-# it should be linked to and not copied. Things like java jar files are found
-# relative to the canonical path of this script.
-#
-
-# See the following page for extensive details on setting
-# up the JVM to accept JMX remote management:
-# http://java.sun.com/javase/6/docs/technotes/guides/management/agent.html
-# by default we allow local JMX connections
-if [ "x$JMXLOCALONLY" = "x" ]
-then
-    JMXLOCALONLY=false
-fi
-
-if [ "x$JMXDISABLE" = "x" ]
-then
-    echo "JMX enabled by default"
-    # for some reason these two options are necessary on jdk6 on Ubuntu
-    #   accord to the docs they are not necessary, but otw jconsole cannot
-    #   do a local attach
-    ZOOMAIN="-Dcom.sun.management.jmxremote -Dcom.sun.management.jmxremote.local.only=$JMXLOCALONLY org.apache.zookeeper.server.quorum.QuorumPeerMain"
-else
-    echo "JMX disabled by user request"
-    ZOOMAIN="org.apache.zookeeper.server.quorum.QuorumPeerMain"
-fi
-
-# Only follow symlinks if readlink supports it
-if readlink -f "$0" > /dev/null 2>&1
-then
-  ZOOBIN=`readlink -f "$0"`
-else
-  ZOOBIN="$0"
-fi
-ZOOBINDIR=`dirname "$ZOOBIN"`
-
-. "$ZOOBINDIR"/zkEnv.sh
-
-if [ "x$2" != "x" ]
-then
-    ZOOCFG="$ZOOCFGDIR/$2"
-fi
-
-if $cygwin
-then
-    ZOOCFG=`cygpath -wp "$ZOOCFG"`
-    # cygwin has a "kill" in the shell itself, gets confused
-    KILL=/bin/kill
-else
-    KILL=kill
-fi
-
-echo "Using config: $ZOOCFG"
-
-ZOOPIDFILE=$(grep dataDir "$ZOOCFG" | sed -e 's/.*=//')/zookeeper_server.pid
-
-
-case $1 in
-start)
-    echo  "Starting zookeeper ... "
-    $JAVA  "-Dzookeeper.log.dir=${ZOO_LOG_DIR}" "-Dzookeeper.root.logger=${ZOO_LOG4J_PROP}" \
-    -cp "$CLASSPATH" $JVMFLAGS $ZOOMAIN "$ZOOCFG" &
-    /bin/echo -n $! > "$ZOOPIDFILE"
-    echo STARTED
-    ;;
-stop)
-    echo "Stopping zookeeper ... "
-    if [ ! -f "$ZOOPIDFILE" ]
-    then
-    echo "error: could not find file $ZOOPIDFILE"
-    exit 1
-    else
-    $KILL -9 $(cat "$ZOOPIDFILE")
-    rm "$ZOOPIDFILE"
-    echo STOPPED
-    fi
-    ;;
-upgrade)
-    shift
-    echo "upgrading the servers to 3.*"
-    java "-Dzookeeper.log.dir=${ZOO_LOG_DIR}" "-Dzookeeper.root.logger=${ZOO_LOG4J_PROP}" \
-    -cp "$CLASSPATH" $JVMFLAGS org.apache.zookeeper.server.upgrade.UpgradeMain ${@}
-    echo "Upgrading ... "
-    ;;
-restart)
-    shift
-    "$0" stop ${@}
-    sleep 3
-    "$0" start ${@}
-    ;;
-status)
-    STAT=`echo stat | nc localhost $(grep clientPort "$ZOOCFG" | sed -e 's/.*=//') 2> /dev/null| grep Mode`
-    if [ "x$STAT" = "x" ]
-    then
-        echo "Error contacting service. It is probably not running."
-    else
-        echo $STAT
-    fi
-    ;;
-*)
-    echo "Usage: $0 {start|stop|restart|status}" >&2
-
-esac

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/files/zkService.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/files/zkService.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/files/zkService.sh
deleted file mode 100644
index 32dfce4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/files/zkService.sh
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-zkcli_script=$1
-user=$2
-conf_dir=$3
-su - $user -c "source $conf_dir/zookeeper-env.sh ; echo 'ls /' | $zkcli_script"

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/files/zkSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/files/zkSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/files/zkSmoke.sh
deleted file mode 100644
index c1c11b4..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/files/zkSmoke.sh
+++ /dev/null
@@ -1,78 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-smoke_script=$1
-smoke_user=$2
-conf_dir=$3
-client_port=$4
-security_enabled=$5
-kinit_path_local=$6
-smoke_user_keytab=$7
-export ZOOKEEPER_EXIT_CODE=0
-test_output_file=/tmp/zkSmoke.out
-errors_expr="ERROR|Exception"
-acceptable_expr="SecurityException"
-zkhosts=` grep "^server\.[[:digit:]]"  $conf_dir/zoo.cfg  | cut -f 2 -d '=' | cut -f 1 -d ':' | tr '\n' ' ' `
-zk_node1=`echo $zkhosts | tr ' ' '\n' | head -n 1`  
-echo "zk_node1=$zk_node1"
-if [[ $security_enabled == "True" ]]; then
-  kinitcmd="$kinit_path_local -kt $smoke_user_keytab $smoke_user"
-  su - $smoke_user -c "$kinitcmd"
-fi
-
-function verify_output() {
-  if [ -f $test_output_file ]; then
-    errors=`grep -E $errors_expr $test_output_file | grep -v $acceptable_expr`
-    if [ "$?" -eq 0 ]; then
-      echo "Error found in the zookeeper smoke test. Exiting."
-      echo $errors
-      exit 1
-    fi
-  fi
-}
-
-# Delete /zk_smoketest znode if exists
-su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ;  echo delete /zk_smoketest | ${smoke_script} -server $zk_node1:$client_port" 2>&1>$test_output_file
-# Create /zk_smoketest znode on one zookeeper server
-su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo create /zk_smoketest smoke_data | ${smoke_script} -server $zk_node1:$client_port" 2>&1>>$test_output_file
-verify_output
-
-for i in $zkhosts ; do
-  echo "Running test on host $i"
-  # Verify the data associated with znode across all the nodes in the zookeeper quorum
-  su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'get /zk_smoketest' | ${smoke_script} -server $i:$client_port"
-  su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'ls /' | ${smoke_script} -server $i:$client_port"
-  output=$(su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'get /zk_smoketest' | ${smoke_script} -server $i:$client_port")
-  echo $output | grep smoke_data
-  if [[ $? -ne 0 ]] ; then
-    echo "Data associated with znode /zk_smoketests is not consistent on host $i"
-    ((ZOOKEEPER_EXIT_CODE=$ZOOKEEPER_EXIT_CODE+1))
-  fi
-done
-
-su - $smoke_user -c "source $conf_dir/zookeeper-env.sh ; echo 'delete /zk_smoketest' | ${smoke_script} -server $zk_node1:$client_port"
-if [[ "$ZOOKEEPER_EXIT_CODE" -ne "0" ]] ; then
-  echo "Zookeeper Smoke Test: Failed" 
-else
-   echo "Zookeeper Smoke Test: Passed" 
-fi
-exit $ZOOKEEPER_EXIT_CODE

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/scripts/__init__.py
deleted file mode 100644
index a582077..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/scripts/__init__.py
+++ /dev/null
@@ -1,21 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/scripts/params.py
deleted file mode 100644
index 9acc0c9..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/scripts/params.py
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-import status_params
-
-# server configurations
-config = Script.get_config()
-
-config_dir = "/etc/zookeeper/conf"
-zk_user =  config['configurations']['global']['zk_user']
-hostname = config['hostname']
-zk_bin = '/usr/lib/zookeeper/bin'
-user_group = config['configurations']['global']['user_group']
-
-smoke_script = "/usr/lib/zookeeper/bin/zkCli.sh"
-
-zk_log_dir = config['configurations']['global']['zk_log_dir']
-zk_data_dir = config['configurations']['global']['zk_data_dir']
-zk_pid_dir = status_params.zk_pid_dir
-zk_pid_file = status_params.zk_pid_file
-zk_server_heapsize = "-Xmx1024m"
-
-tickTime = config['configurations']['global']['tickTime']
-initLimit = config['configurations']['global']['initLimit']
-syncLimit = config['configurations']['global']['syncLimit']
-clientPort = config['configurations']['global']['clientPort']
-
-if 'zoo.cfg' in config['configurations']:
-  zoo_cfg_properties_map = config['configurations']['zoo.cfg']
-else:
-  zoo_cfg_properties_map = {}
-zoo_cfg_properties_map_length = len(zoo_cfg_properties_map)
-
-zk_primary_name = "zookeeper"
-zk_principal_name = "zookeeper/_HOST@EXAMPLE.COM"
-zk_principal = zk_principal_name.replace('_HOST',hostname)
-
-java64_home = config['hostLevelParams']['java_home']
-
-zookeeper_hosts = config['clusterHostInfo']['zookeeper_hosts']
-zookeeper_hosts.sort()
-
-keytab_path = "/etc/security/keytabs"
-zk_keytab_path = format("{keytab_path}/zk.service.keytab")
-zk_server_jaas_file = format("{config_dir}/zookeeper_jaas.conf")
-zk_client_jaas_file = format("{config_dir}/zookeeper_client_jaas.conf")
-security_enabled = config['configurations']['global']['security_enabled']
-
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-smokeuser = config['configurations']['global']['smokeuser']
-kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/scripts/service_check.py
deleted file mode 100644
index 6b3553d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/scripts/service_check.py
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-class ZookeeperServiceCheck(Script):
-  def service_check(self, env):
-    import params
-    env.set_params(params)
-
-    File("/tmp/zkSmoke.sh",
-         mode=0755,
-         content=StaticFile('zkSmoke.sh')
-    )
-
-    cmd_qourum = format("sh /tmp/zkSmoke.sh {smoke_script} {smokeuser} {config_dir} {clientPort} "
-                  "{security_enabled} {kinit_path_local} {smokeUserKeytab}",
-                  smokeUserKeytab=params.smoke_user_keytab if params.security_enabled else "no_keytab")
-
-    Execute(cmd_qourum,
-            tries=3,
-            try_sleep=5,
-            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
-            logoutput=True
-    )
-
-if __name__ == "__main__":
-  ZookeeperServiceCheck().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/scripts/status_params.py
deleted file mode 100644
index 98f2903..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/scripts/status_params.py
+++ /dev/null
@@ -1,26 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-zk_pid_dir = config['configurations']['global']['zk_pid_dir']
-zk_pid_file = format("{zk_pid_dir}/zookeeper_server.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/scripts/zookeeper.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/scripts/zookeeper.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/scripts/zookeeper.py
deleted file mode 100644
index c49eb22..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/scripts/zookeeper.py
+++ /dev/null
@@ -1,92 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-import sys
-
-
-def zookeeper(type = None):
-  import params
-
-  Directory(params.config_dir,
-            owner=params.zk_user,
-            recursive=True,
-            group=params.user_group
-  )
-
-  configFile("zoo.cfg", template_name="zoo.cfg.j2")
-  configFile("zookeeper-env.sh", template_name="zookeeper-env.sh.j2")
-  configFile("configuration.xsl", template_name="configuration.xsl.j2")
-
-  Directory(params.zk_pid_dir,
-            owner=params.zk_user,
-            recursive=True,
-            group=params.user_group
-  )
-
-  Directory(params.zk_log_dir,
-            owner=params.zk_user,
-            recursive=True,
-            group=params.user_group
-  )
-
-  Directory(params.zk_data_dir,
-            owner=params.zk_user,
-            recursive=True,
-            group=params.user_group
-  )
-
-  if type == 'server':
-    myid = str(sorted(params.zookeeper_hosts).index(params.hostname) + 1)
-
-    File(format("{zk_data_dir}/myid"),
-         mode = 0644,
-         content = myid
-    )
-
-  configFile("log4j.properties", template_name="log4j.properties.j2")
-
-  if params.security_enabled:
-    if type == "server":
-      configFile("zookeeper_jaas.conf", template_name="zookeeper_jaas.conf.j2")
-      configFile("zookeeper_client_jaas.conf", template_name="zookeeper_client_jaas.conf.j2")
-    else:
-      configFile("zookeeper_client_jaas.conf", template_name="zookeeper_client_jaas.conf.j2")
-
-  File(format("{config_dir}/zoo_sample.cfg"),
-       owner=params.zk_user,
-       group=params.user_group
-  )
-
-
-def configFile(name, template_name=None):
-  import params
-
-  File(format("{config_dir}/{name}"),
-       content=Template(template_name),
-       owner=params.zk_user,
-       group=params.user_group
-  )
-
-
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/scripts/zookeeper_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/scripts/zookeeper_client.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/scripts/zookeeper_client.py
deleted file mode 100644
index 028a37d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/scripts/zookeeper_client.py
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-from resource_management import *
-
-from zookeeper import zookeeper
-
-class ZookeeperClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-    env.set_params(params)
-
-    zookeeper(type='client')
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-if __name__ == "__main__":
-  ZookeeperClient().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/scripts/zookeeper_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/scripts/zookeeper_server.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/scripts/zookeeper_server.py
deleted file mode 100644
index e8cc264..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/scripts/zookeeper_server.py
+++ /dev/null
@@ -1,55 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-import sys
-from resource_management import *
-
-from zookeeper import zookeeper
-from zookeeper_service import zookeeper_service
-
-class ZookeeperServer(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-  def configure(self, env):
-    import params
-    env.set_params(params)
-    zookeeper(type='server')
-
-  def start(self, env):
-    import params
-    env.set_params(params)
-    self.configure(env)
-    zookeeper_service(action = 'start')
-
-  def stop(self, env):
-    import params
-    env.set_params(params)
-    zookeeper_service(action = 'stop')
-
-  def status(self, env):
-    import status_params
-    env.set_params(status_params)
-    check_process_status(status_params.zk_pid_file)
-
-if __name__ == "__main__":
-  ZookeeperServer().execute()
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/scripts/zookeeper_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/scripts/zookeeper_service.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/scripts/zookeeper_service.py
deleted file mode 100644
index 83b8f08..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/scripts/zookeeper_service.py
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/usr/bin/env python2.6
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-Ambari Agent
-
-"""
-
-from resource_management import *
-
-def zookeeper_service(action='start'):
-  import params
-
-  cmd = format("env ZOOCFGDIR={config_dir} ZOOCFG=zoo.cfg {zk_bin}/zkServer.sh")
-
-  if action == 'start':
-    daemon_cmd = format("source {config_dir}/zookeeper-env.sh ; {cmd} start")
-    no_op_test = format("ls {zk_pid_file} >/dev/null 2>&1 && ps `cat {zk_pid_file}` >/dev/null 2>&1")
-    Execute(daemon_cmd,
-            not_if=no_op_test,
-            user=params.zk_user
-    )
-  elif action == 'stop':
-    daemon_cmd = format("source {config_dir}/zookeeper-env.sh ; {cmd} stop")
-    rm_pid = format("rm -f {zk_pid_file}")
-    Execute(daemon_cmd,
-            user=params.zk_user
-    )
-    Execute(rm_pid)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/templates/configuration.xsl.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/templates/configuration.xsl.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/templates/configuration.xsl.j2
deleted file mode 100644
index c003ba2..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/ZOOKEEPER/package/templates/configuration.xsl.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-<?xml version="1.0"?>
-<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
-<xsl:output method="html"/>
-<xsl:template match="configuration">
-<html>
-<body>
-<table border="1">
-<tr>
- <td>name</td>
- <td>value</td>
- <td>description</td>
-</tr>
-<xsl:for-each select="property">
-  <tr>
-     <td><a name="{name}"><xsl:value-of select="name"/></a></td>
-     <td><xsl:value-of select="value"/></td>
-     <td><xsl:value-of select="description"/></td>
-  </tr>
-</xsl:for-each>
-</table>
-</body>
-</html>
-</xsl:template>
-</xsl:stylesheet>


[13/37] AMBARI-4341. Rename 2.0.8 to 2.1.1 in the stack definition. (mahadev)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/rrdcachedLib.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/rrdcachedLib.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/rrdcachedLib.sh
new file mode 100644
index 0000000..8b7c257
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/rrdcachedLib.sh
@@ -0,0 +1,47 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get access to Ganglia-wide constants etc.
+source ./gangliaLib.sh;
+
+RRDCACHED_BIN=/usr/bin/rrdcached;
+RRDCACHED_PID_FILE=${GANGLIA_RUNTIME_DIR}/rrdcached.pid;
+RRDCACHED_ALL_ACCESS_UNIX_SOCKET=${GANGLIA_RUNTIME_DIR}/rrdcached.sock;
+RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET=${GANGLIA_RUNTIME_DIR}/rrdcached.limited.sock;
+
+function getRrdcachedLoggedPid()
+{
+    if [ -e "${RRDCACHED_PID_FILE}" ]
+    then
+        echo `cat ${RRDCACHED_PID_FILE}`;
+    fi
+}
+
+function getRrdcachedRunningPid()
+{
+    rrdcachedLoggedPid=`getRrdcachedLoggedPid`;
+
+    if [ -n "${rrdcachedLoggedPid}" ]
+    then
+        echo `ps -o pid=MYPID -p ${rrdcachedLoggedPid} | tail -1 | awk '{print $1}' | grep -v MYPID`;
+    fi
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/setupGanglia.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/setupGanglia.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/setupGanglia.sh
new file mode 100644
index 0000000..5145b9c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/setupGanglia.sh
@@ -0,0 +1,141 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get access to Ganglia-wide constants, utilities etc.
+source ./gangliaLib.sh
+
+function usage()
+{
+  cat << END_USAGE
+Usage: ${0} [-c <gmondClusterName> [-m]] [-t] [-o <owner>] [-g <group>]
+
+Options:
+  -c <gmondClusterName>   The name of the Ganglia Cluster whose gmond configuration we're here to generate.
+
+  -m                      Whether this gmond (if -t is not specified) is the master for its Ganglia 
+                          Cluster. Without this, we generate slave gmond configuration.
+
+  -t                      Whether this is a call to generate gmetad configuration (as opposed to the
+                          gmond configuration that is generated without this).
+  -o <owner>              Owner
+  -g <group>              Group
+END_USAGE
+}
+
+function instantiateGmetadConf()
+{
+  # gmetad utility library.
+  source ./gmetadLib.sh;
+
+  generateGmetadConf > ${GMETAD_CONF_FILE};
+}
+
+function instantiateGmondConf()
+{
+  # gmond utility library.
+  source ./gmondLib.sh;
+ 
+  gmondClusterName=${1};
+
+  if [ "x" != "x${gmondClusterName}" ]
+  then
+
+    createDirectory "${GANGLIA_RUNTIME_DIR}/${gmondClusterName}";
+    createDirectory "${GANGLIA_CONF_DIR}/${gmondClusterName}/conf.d";
+    
+    # Always blindly generate the core gmond config - that goes on every box running gmond. 
+    generateGmondCoreConf ${gmondClusterName} > `getGmondCoreConfFileName ${gmondClusterName}`;
+
+    isMasterGmond=${2};
+
+    # Decide whether we want to add on the master or slave gmond config.
+    if [ "0" -eq "${isMasterGmond}" ]
+    then
+      generateGmondSlaveConf ${gmondClusterName} > `getGmondSlaveConfFileName ${gmondClusterName}`;
+    else
+      generateGmondMasterConf ${gmondClusterName} > `getGmondMasterConfFileName ${gmondClusterName}`;
+    fi
+
+    chown -R ${3}:${4} ${GANGLIA_CONF_DIR}/${gmondClusterName}
+
+  else
+    echo "No gmondClusterName passed in, nothing to instantiate";
+  fi
+}
+
+# main()
+
+gmondClusterName=;
+isMasterGmond=0;
+configureGmetad=0;
+owner='root';
+group='root';
+
+while getopts ":c:mto:g:" OPTION
+do
+  case ${OPTION} in
+    c) 
+      gmondClusterName=${OPTARG};
+      ;;
+    m)
+      isMasterGmond=1;
+      ;;
+    t)
+      configureGmetad=1;
+      ;;
+    o)
+      owner=${OPTARG};
+      ;;
+    g)
+      group=${OPTARG};
+      ;;
+    ?)
+      usage;
+      exit 1;
+  esac
+done
+
+# Initialization.
+createDirectory ${GANGLIA_CONF_DIR};
+createDirectory ${GANGLIA_RUNTIME_DIR};
+# So rrdcached can drop its PID files in here.
+chmod a+w ${GANGLIA_RUNTIME_DIR};
+chown ${owner}:${group} ${GANGLIA_CONF_DIR};
+
+if [ -n "${gmondClusterName}" ]
+then
+
+  # Be forgiving of users who pass in -c along with -t (which always takes precedence).
+  if [ "1" -eq "${configureGmetad}" ]
+  then
+    instantiateGmetadConf;
+  else
+    instantiateGmondConf ${gmondClusterName} ${isMasterGmond} ${owner} ${group};
+  fi
+
+elif [ "1" -eq "${configureGmetad}" ]
+then
+  instantiateGmetadConf;
+else
+  usage;
+  exit 2;
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/startGmetad.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/startGmetad.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/startGmetad.sh
new file mode 100644
index 0000000..ab5102d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/startGmetad.sh
@@ -0,0 +1,64 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+source ./gmetadLib.sh;
+
+# To get access to ${RRDCACHED_ALL_ACCESS_UNIX_SOCKET}.
+source ./rrdcachedLib.sh;
+
+# Before starting gmetad, start rrdcached.
+./startRrdcached.sh;
+
+if [ $? -eq 0 ] 
+then
+    gmetadRunningPid=`getGmetadRunningPid`;
+
+    # Only attempt to start gmetad if there's not already one running.
+    if [ -z "${gmetadRunningPid}" ]
+    then
+        env RRDCACHED_ADDRESS=${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
+                    ${GMETAD_BIN} --conf=${GMETAD_CONF_FILE} --pid-file=${GMETAD_PID_FILE};
+
+        for i in `seq 0 5`; do
+          gmetadRunningPid=`getGmetadRunningPid`;
+          if [ -n "${gmetadRunningPid}" ]
+          then
+            break;
+          fi
+          sleep 1;
+        done
+
+        if [ -n "${gmetadRunningPid}" ]
+        then
+            echo "Started ${GMETAD_BIN} with PID ${gmetadRunningPid}";
+        else
+            echo "Failed to start ${GMETAD_BIN}";
+            exit 1;
+        fi
+    else
+        echo "${GMETAD_BIN} already running with PID ${gmetadRunningPid}";
+    fi
+else
+    echo "Not starting ${GMETAD_BIN} because starting ${RRDCACHED_BIN} failed.";
+    exit 2;
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/startGmond.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/startGmond.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/startGmond.sh
new file mode 100644
index 0000000..239b62e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/startGmond.sh
@@ -0,0 +1,80 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
+source ./gmondLib.sh;
+
+function startGmondForCluster()
+{
+    gmondClusterName=${1};
+
+    gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
+
+    # Only attempt to start gmond if there's not already one running.
+    if [ -z "${gmondRunningPid}" ]
+    then
+      gmondCoreConfFileName=`getGmondCoreConfFileName ${gmondClusterName}`;
+
+      if [ -e "${gmondCoreConfFileName}" ]
+      then 
+        gmondPidFileName=`getGmondPidFileName ${gmondClusterName}`;
+
+        ${GMOND_BIN} --conf=${gmondCoreConfFileName} --pid-file=${gmondPidFileName};
+
+        for i in `seq 0 5`; do
+          gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
+          if [ -n "${gmondRunningPid}" ]
+          then
+            break;
+          fi
+          sleep 1;
+        done
+  
+        if [ -n "${gmondRunningPid}" ]
+        then
+            echo "Started ${GMOND_BIN} for cluster ${gmondClusterName} with PID ${gmondRunningPid}";
+        else
+            echo "Failed to start ${GMOND_BIN} for cluster ${gmondClusterName}";
+            exit 1;
+        fi
+      fi 
+    else
+      echo "${GMOND_BIN} for cluster ${gmondClusterName} already running with PID ${gmondRunningPid}";
+    fi
+}
+
+# main()
+gmondClusterName=${1};
+
+if [ "x" == "x${gmondClusterName}" ]
+then
+    # No ${gmondClusterName} passed in as command-line arg, so start 
+    # all the gmonds we know about.
+    for gmondClusterName in `getConfiguredGangliaClusterNames`
+    do
+        startGmondForCluster ${gmondClusterName};
+    done
+else
+    # Just start the one ${gmondClusterName} that was asked for.
+    startGmondForCluster ${gmondClusterName};
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/startRrdcached.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/startRrdcached.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/startRrdcached.sh
new file mode 100644
index 0000000..e79472b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/startRrdcached.sh
@@ -0,0 +1,69 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Slurp in all our user-customizable settings.
+source ./gangliaEnv.sh;
+
+# Get all our common constants etc. set up.
+source ./rrdcachedLib.sh;
+
+rrdcachedRunningPid=`getRrdcachedRunningPid`;
+
+# Only attempt to start rrdcached if there's not already one running.
+if [ -z "${rrdcachedRunningPid}" ]
+then
+    #changed because problem puppet had with nobody user
+    #sudo -u ${GMETAD_USER} ${RRDCACHED_BIN} -p ${RRDCACHED_PID_FILE} \
+    #         -m 664 -l unix:${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
+    #         -m 777 -P FLUSH,STATS,HELP -l unix:${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET} \
+    #         -b /var/lib/ganglia/rrds -B
+    su - ${GMETAD_USER} -c "${RRDCACHED_BIN} -p ${RRDCACHED_PID_FILE} \
+             -m 664 -l unix:${RRDCACHED_ALL_ACCESS_UNIX_SOCKET} \
+             -m 777 -P FLUSH,STATS,HELP -l unix:${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET} \
+             -b ${RRDCACHED_BASE_DIR} -B"
+
+    # Ideally, we'd use ${RRDCACHED_BIN}'s -s ${WEBSERVER_GROUP} option for 
+    # this, but it doesn't take sometimes due to a lack of permissions,
+    # so perform the operation explicitly to be super-sure.
+    chgrp ${WEBSERVER_GROUP} ${RRDCACHED_ALL_ACCESS_UNIX_SOCKET};
+    chgrp ${WEBSERVER_GROUP} ${RRDCACHED_LIMITED_ACCESS_UNIX_SOCKET};
+
+    # Check to make sure rrdcached actually started up.
+    for i in `seq 0 5`; do
+      rrdcachedRunningPid=`getRrdcachedRunningPid`;
+      if [ -n "${rrdcachedRunningPid}" ]
+        then
+          break;
+      fi
+      sleep 1;
+    done
+
+    if [ -n "${rrdcachedRunningPid}" ]
+    then
+        echo "Started ${RRDCACHED_BIN} with PID ${rrdcachedRunningPid}";
+    else
+        echo "Failed to start ${RRDCACHED_BIN}";
+        exit 1;
+    fi
+else
+    echo "${RRDCACHED_BIN} already running with PID ${rrdcachedRunningPid}";
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/stopGmetad.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/stopGmetad.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/stopGmetad.sh
new file mode 100644
index 0000000..2764e0e
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/stopGmetad.sh
@@ -0,0 +1,43 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+source ./gmetadLib.sh;
+
+gmetadRunningPid=`getGmetadRunningPid`;
+
+# Only go ahead with the termination if we could find a running PID.
+if [ -n "${gmetadRunningPid}" ]
+then
+    kill -KILL ${gmetadRunningPid};
+    echo "Stopped ${GMETAD_BIN} (with PID ${gmetadRunningPid})";
+fi
+
+# Poll again.
+gmetadRunningPid=`getGmetadRunningPid`;
+
+# Once we've killed gmetad, there should no longer be a running PID.
+if [ -z "${gmetadRunningPid}" ]
+then
+    # It's safe to stop rrdcached now.
+    ./stopRrdcached.sh;
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/stopGmond.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/stopGmond.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/stopGmond.sh
new file mode 100644
index 0000000..1af3eb9
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/stopGmond.sh
@@ -0,0 +1,54 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+# Pulls in gangliaLib.sh as well, so we can skip pulling it in again.
+source ./gmondLib.sh;
+
+function stopGmondForCluster()
+{
+    gmondClusterName=${1};
+
+    gmondRunningPid=`getGmondRunningPid ${gmondClusterName}`;
+
+    # Only go ahead with the termination if we could find a running PID.
+    if [ -n "${gmondRunningPid}" ]
+    then
+      kill -KILL ${gmondRunningPid};
+      echo "Stopped ${GMOND_BIN} for cluster ${gmondClusterName} (with PID ${gmondRunningPid})";
+    fi
+}
+
+# main()
+gmondClusterName=${1};
+
+if [ "x" == "x${gmondClusterName}" ]
+then
+    # No ${gmondClusterName} passed in as command-line arg, so stop
+    # all the gmonds we know about.
+    for gmondClusterName in `getConfiguredGangliaClusterNames`
+    do
+        stopGmondForCluster ${gmondClusterName};
+    done
+else
+    stopGmondForCluster ${gmondClusterName};
+fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/stopRrdcached.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/stopRrdcached.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/stopRrdcached.sh
new file mode 100644
index 0000000..0a0d8d8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/stopRrdcached.sh
@@ -0,0 +1,41 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get all our common constants etc. set up.
+source ./rrdcachedLib.sh;
+
+rrdcachedRunningPid=`getRrdcachedRunningPid`;
+
+# Only go ahead with the termination if we could find a running PID.
+if [ -n "${rrdcachedRunningPid}" ]
+then
+    kill -TERM ${rrdcachedRunningPid};
+    # ${RRDCACHED_BIN} takes a few seconds to drain its buffers, so wait 
+    # until we're sure it's well and truly dead. 
+    #
+    # Without this, an immediately following startRrdcached.sh won't do
+    # anything, because it still sees this soon-to-die instance alive,
+    # and the net result is that after a few seconds, there's no
+    # ${RRDCACHED_BIN} running on the box anymore.
+    sleep 5;
+    echo "Stopped ${RRDCACHED_BIN} (with PID ${rrdcachedRunningPid})";
+fi 

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/teardownGanglia.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/teardownGanglia.sh b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/teardownGanglia.sh
new file mode 100644
index 0000000..b27f7a2
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/files/teardownGanglia.sh
@@ -0,0 +1,28 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+# Get access to Ganglia-wide constants, utilities etc.
+source ./gangliaLib.sh;
+
+# Undo what we did while setting up Ganglia on this box.
+rm -rf ${GANGLIA_CONF_DIR};
+rm -rf ${GANGLIA_RUNTIME_DIR};

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia.py
new file mode 100644
index 0000000..75626b1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia.py
@@ -0,0 +1,97 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management import *
+import os
+
+
+def groups_and_users():
+  import params
+
+def config():
+  import params
+
+  shell_cmds_dir = params.ganglia_shell_cmds_dir
+  shell_files = ['checkGmond.sh', 'checkRrdcached.sh', 'gmetadLib.sh',
+                 'gmondLib.sh', 'rrdcachedLib.sh',
+                 'setupGanglia.sh', 'startGmetad.sh', 'startGmond.sh',
+                 'startRrdcached.sh', 'stopGmetad.sh',
+                 'stopGmond.sh', 'stopRrdcached.sh', 'teardownGanglia.sh']
+  Directory(shell_cmds_dir,
+            owner="root",
+            group="root",
+            recursive=True
+  )
+  init_file("gmetad")
+  init_file("gmond")
+  for sh_file in shell_files:
+    shell_file(sh_file)
+  for conf_file in ['gangliaClusters.conf', 'gangliaEnv.sh', 'gangliaLib.sh']:
+    ganglia_TemplateConfig(conf_file)
+
+
+def init_file(name):
+  import params
+
+  File("/etc/init.d/hdp-" + name,
+       content=StaticFile(name + ".init"),
+       mode=0755
+  )
+
+
+def shell_file(name):
+  import params
+
+  File(params.ganglia_shell_cmds_dir + os.sep + name,
+       content=StaticFile(name),
+       mode=0755
+  )
+
+
+def ganglia_TemplateConfig(name, mode=755, tag=None):
+  import params
+
+  TemplateConfig(format("{params.ganglia_shell_cmds_dir}/{name}"),
+                 owner="root",
+                 group="root",
+                 template_tag=tag,
+                 mode=mode
+  )
+
+
+def generate_daemon(ganglia_service,
+                    name=None,
+                    role=None,
+                    owner=None,
+                    group=None):
+  import params
+
+  cmd = ""
+  if ganglia_service == "gmond":
+    if role == "server":
+      cmd = "{params.ganglia_shell_cmds_dir}/setupGanglia.sh -c {name} -m -o {owner} -g {group}"
+    else:
+      cmd = "{params.ganglia_shell_cmds_dir}/setupGanglia.sh -c {name} -o {owner} -g {group}"
+  elif ganglia_service == "gmetad":
+    cmd = "{params.ganglia_shell_cmds_dir}/setupGanglia.sh -t -o {owner} -g {group}"
+  else:
+    raise Fail("Unexpected ganglia service")
+  Execute(format(cmd),
+          path=[params.ganglia_shell_cmds_dir, "/usr/sbin",
+                "/sbin:/usr/local/bin", "/bin", "/usr/bin"]
+  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_monitor.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_monitor.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_monitor.py
new file mode 100644
index 0000000..6ae004b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_monitor.py
@@ -0,0 +1,176 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import sys
+import os
+from os import path
+from resource_management import *
+from ganglia import generate_daemon
+import ganglia
+import ganglia_monitor_service
+
+
+class GangliaMonitor(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env)
+    env.set_params(params)
+    self.config(env)
+
+  def start(self, env):
+    ganglia_monitor_service.monitor("start")
+
+  def stop(self, env):
+    ganglia_monitor_service.monitor("stop")
+
+
+  def status(self, env):
+    import status_params
+    pid_file_name = 'gmond.pid'
+    pid_file_count = 0
+    pid_dir = status_params.pid_dir
+    # Recursively check all existing gmond pid files
+    for cur_dir, subdirs, files in os.walk(pid_dir):
+      for file_name in files:
+        if file_name == pid_file_name:
+          pid_file = os.path.join(cur_dir, file_name)
+          check_process_status(pid_file)
+          pid_file_count += 1
+    if pid_file_count == 0: # If no any pid file is present
+      raise ComponentIsNotRunning()
+
+
+  def config(self, env):
+    import params
+
+    ganglia.groups_and_users()
+
+    Directory(params.ganglia_conf_dir,
+              owner="root",
+              group=params.user_group,
+              recursive=True
+    )
+
+    ganglia.config()
+
+    if params.is_namenode_master:
+      generate_daemon("gmond",
+                      name = "HDPNameNode",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_jtnode_master:
+      generate_daemon("gmond",
+                      name = "HDPJobTracker",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_rmnode_master:
+      generate_daemon("gmond",
+                      name = "HDPResourceManager",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_hsnode_master:
+      generate_daemon("gmond",
+                      name = "HDPHistoryServer",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_hbase_master:
+      generate_daemon("gmond",
+                      name = "HDPHBaseMaster",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_nodemanager:
+      generate_daemon("gmond",
+                      name = "HDPNodeManager",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_hsnode_master:
+      generate_daemon("gmond",
+                      name = "HDPHistoryServer",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_slave:
+      generate_daemon("gmond",
+                      name = "HDPDataNode",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_tasktracker:
+      generate_daemon("gmond",
+                      name = "HDPTaskTracker",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_hbase_rs:
+      generate_daemon("gmond",
+                      name = "HDPHBaseRegionServer",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_flume:
+      generate_daemon("gmond",
+                      name = "HDPFlumeServer",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.is_jn_host:
+      generate_daemon("gmond",
+                      name = "HDPJournalNode",
+                      role = "monitor",
+                      owner = "root",
+                      group = params.user_group)
+
+    Directory(path.join(params.ganglia_dir, "conf.d"),
+              owner="root",
+              group=params.user_group
+    )
+
+    File(path.join(params.ganglia_dir, "conf.d/modgstatus.conf"),
+         owner="root",
+         group=params.user_group
+    )
+    File(path.join(params.ganglia_dir, "conf.d/multicpu.conf"),
+         owner="root",
+         group=params.user_group
+    )
+    File(path.join(params.ganglia_dir, "gmond.conf"),
+         owner="root",
+         group=params.user_group
+    )
+
+
+if __name__ == "__main__":
+  GangliaMonitor().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_monitor_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_monitor_service.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_monitor_service.py
new file mode 100644
index 0000000..d86d894
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_monitor_service.py
@@ -0,0 +1,31 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management import *
+
+
+def monitor(action=None):# 'start' or 'stop'
+  if action == "start":
+    Execute("chkconfig gmond off",
+            path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin',
+    )
+  Execute(
+    format(
+      "service hdp-gmond {action} >> /tmp/gmond.log  2>&1 ; /bin/ps auwx | /bin/grep [g]mond  >> /tmp/gmond.log  2>&1"),
+    path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
+  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_server.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_server.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_server.py
new file mode 100644
index 0000000..ab730de
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_server.py
@@ -0,0 +1,197 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+import sys
+import os
+from os import path
+from resource_management import *
+from ganglia import generate_daemon
+import ganglia
+import ganglia_server_service
+
+
+class GangliaServer(Script):
+  def install(self, env):
+    import params
+
+    self.install_packages(env)
+    env.set_params(params)
+    self.config(env)
+
+  def start(self, env):
+    import params
+
+    env.set_params(params)
+    ganglia_server_service.server("start")
+
+  def stop(self, env):
+    import params
+
+    env.set_params(params)
+    ganglia_server_service.server("stop")
+
+  def status(self, env):
+    import status_params
+    env.set_params(status_params)
+    pid_file = format("{pid_dir}/gmetad.pid")
+    # Recursively check all existing gmetad pid files
+    check_process_status(pid_file)
+
+  def config(self, env):
+    import params
+
+    ganglia.groups_and_users()
+    ganglia.config()
+
+    if params.has_namenodes:
+      generate_daemon("gmond",
+                      name = "HDPNameNode",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_jobtracker:
+      generate_daemon("gmond",
+                      name = "HDPJobTracker",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_hbase_masters:
+      generate_daemon("gmond",
+                      name = "HDPHBaseMaster",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_resourcemanager:
+      generate_daemon("gmond",
+                      name = "HDPResourceManager",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_nodemanager:
+      generate_daemon("gmond",
+                      name = "HDPNodeManager",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_historyserver:
+      generate_daemon("gmond",
+                      name = "HDPHistoryServer",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_slaves:
+      generate_daemon("gmond",
+                      name = "HDPDataNode",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_tasktracker:
+      generate_daemon("gmond",
+                      name = "HDPTaskTracker",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_hbase_rs:
+      generate_daemon("gmond",
+                      name = "HDPHBaseRegionServer",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_flume:
+      generate_daemon("gmond",
+                      name = "HDPFlumeServer",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    if params.has_journalnode:
+      generate_daemon("gmond",
+                      name = "HDPJournalNode",
+                      role = "server",
+                      owner = "root",
+                      group = params.user_group)
+
+    generate_daemon("gmetad",
+                    name = "gmetad",
+                    role = "server",
+                    owner = "root",
+                    group = params.user_group)
+
+    change_permission()
+    server_files()
+    File(path.join(params.ganglia_dir, "gmetad.conf"),
+         owner="root",
+         group=params.user_group
+    )
+
+
+def change_permission():
+  import params
+
+  Directory('/var/lib/ganglia/dwoo',
+            mode=0777,
+            owner=params.gmetad_user,
+            recursive=True
+  )
+
+
+def server_files():
+  import params
+
+  rrd_py_path = params.rrd_py_path
+  Directory(rrd_py_path,
+            recursive=True
+  )
+  rrd_py_file_path = path.join(rrd_py_path, "rrd.py")
+  File(rrd_py_file_path,
+       content=StaticFile("rrd.py"),
+       mode=0755
+  )
+  rrd_file_owner = params.gmetad_user
+  if params.rrdcached_default_base_dir != params.rrdcached_base_dir:
+    Directory(params.rrdcached_base_dir,
+              owner=rrd_file_owner,
+              group=rrd_file_owner,
+              mode=0755,
+              recursive=True
+    )
+    Directory(params.rrdcached_default_base_dir,
+              action = "delete"
+    )
+    Link(params.rrdcached_default_base_dir,
+         to=params.rrdcached_base_dir
+    )
+  elif rrd_file_owner != 'nobody':
+    Directory(params.rrdcached_default_base_dir,
+              owner=rrd_file_owner,
+              group=rrd_file_owner,
+              recursive=True
+    )
+
+
+if __name__ == "__main__":
+  GangliaServer().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_server_service.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_server_service.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_server_service.py
new file mode 100644
index 0000000..b93e3f8
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/ganglia_server_service.py
@@ -0,0 +1,27 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management import *
+
+
+def server(action=None):# 'start' or 'stop'
+  command = "service hdp-gmetad {action} >> /tmp/gmetad.log  2>&1 ; /bin/ps auwx | /bin/grep [g]metad  >> /tmp/gmetad.log  2>&1"
+  Execute(format(command),
+          path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
+  )
+  MonitorWebserver("restart")

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/params.py
new file mode 100644
index 0000000..32a7e4b
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/params.py
@@ -0,0 +1,80 @@
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+"""
+
+from resource_management import *
+from resource_management.core.system import System
+
+config = Script.get_config()
+
+user_group = config['configurations']['global']["user_group"]
+ganglia_conf_dir = "/etc/ganglia/hdp"
+ganglia_dir = "/etc/ganglia"
+ganglia_runtime_dir = config['configurations']['global']["ganglia_runtime_dir"]
+ganglia_shell_cmds_dir = "/usr/libexec/hdp/ganglia"
+
+gmetad_user = config['configurations']['global']["gmetad_user"]
+gmond_user = config['configurations']['global']["gmond_user"]
+
+webserver_group = "apache"
+rrdcached_default_base_dir = "/var/lib/ganglia/rrds"
+rrdcached_base_dir = config['configurations']['global']["rrdcached_base_dir"]
+
+ganglia_server_host = config["clusterHostInfo"]["ganglia_server_host"][0]
+
+hostname = config["hostname"]
+namenode_host = default("/clusterHostInfo/namenode_host", [])
+jtnode_host = default("/clusterHostInfo/jtnode_host", [])
+rm_host = default("/clusterHostInfo/rm_host", [])
+hs_host = default("/clusterHostInfo/hs_host", [])
+hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
+# datanodes are marked as slave_hosts
+slave_hosts = default("/clusterHostInfo/slave_hosts", [])
+tt_hosts = default("/clusterHostInfo/mapred_tt_hosts", [])
+nm_hosts = default("/clusterHostInfo/nm_hosts", [])
+hbase_rs_hosts = default("/clusterHostInfo/hbase_rs_hosts", [])
+flume_hosts = default("/clusterHostInfo/flume_hosts", [])
+jn_hosts = default("/clusterHostInfo/journalnode_hosts", [])
+
+is_namenode_master = hostname in namenode_host
+is_jtnode_master = hostname in jtnode_host
+is_rmnode_master = hostname in rm_host
+is_hsnode_master = hostname in hs_host
+is_hbase_master = hostname in hbase_master_hosts
+is_slave = hostname in slave_hosts
+is_tasktracker = hostname in tt_hosts
+is_nodemanager = hostname in nm_hosts
+is_hbase_rs = hostname in hbase_rs_hosts
+is_flume = hostname in flume_hosts
+is_jn_host = hostname in jn_hosts
+
+has_namenodes = not len(namenode_host) == 0
+has_jobtracker = not len(jtnode_host) == 0
+has_resourcemanager = not len(rm_host) == 0
+has_historyserver = not len(hs_host) == 0
+has_hbase_masters = not len(hbase_master_hosts) == 0
+has_slaves = not len(slave_hosts) == 0
+has_tasktracker = not len(tt_hosts) == 0
+has_nodemanager = not len(nm_hosts) == 0
+has_hbase_rs = not len(hbase_rs_hosts) == 0
+has_flume = not len(flume_hosts) == 0
+has_journalnode = not len(jn_hosts) == 0
+
+if System.get_instance().platform == "suse":
+  rrd_py_path = '/srv/www/cgi-bin'
+else:
+  rrd_py_path = '/var/www/cgi-bin'

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/status_params.py
new file mode 100644
index 0000000..3ccad2f
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/scripts/status_params.py
@@ -0,0 +1,25 @@
+#!/usr/bin/env python
+"""
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+
+"""
+
+from resource_management import *
+
+config = Script.get_config()
+
+pid_dir = config['configurations']['global']['ganglia_runtime_dir']

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/templates/gangliaClusters.conf.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/templates/gangliaClusters.conf.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/templates/gangliaClusters.conf.j2
new file mode 100644
index 0000000..f3bb355
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/templates/gangliaClusters.conf.j2
@@ -0,0 +1,35 @@
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+#########################################################
+### ClusterName           GmondMasterHost   GmondPort ###
+#########################################################
+
+    HDPJournalNode      {{ganglia_server_host}}   8654
+    HDPFlumeServer      {{ganglia_server_host}}   8655
+    HDPHBaseRegionServer       	{{ganglia_server_host}}   8656
+    HDPNodeManager     	{{ganglia_server_host}}   8657
+    HDPTaskTracker     	{{ganglia_server_host}}   8658
+    HDPDataNode       	{{ganglia_server_host}}   8659
+    HDPSlaves       	{{ganglia_server_host}}   8660
+    HDPNameNode         {{ganglia_server_host}}   8661
+    HDPJobTracker     	{{ganglia_server_host}}   8662
+    HDPHBaseMaster      {{ganglia_server_host}}   8663
+    HDPResourceManager  {{ganglia_server_host}}   8664
+    HDPHistoryServer    {{ganglia_server_host}}   8666
+

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/templates/gangliaEnv.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/templates/gangliaEnv.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/templates/gangliaEnv.sh.j2
new file mode 100644
index 0000000..1ead550
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/templates/gangliaEnv.sh.j2
@@ -0,0 +1,24 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+# Unix users and groups for the binaries we start up.
+GMETAD_USER={{gmetad_user}};
+GMOND_USER={{gmond_user}};
+WEBSERVER_GROUP={{webserver_group}};

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/templates/gangliaLib.sh.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/templates/gangliaLib.sh.j2 b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/templates/gangliaLib.sh.j2
new file mode 100644
index 0000000..4b5bdd1
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/GANGLIA/package/templates/gangliaLib.sh.j2
@@ -0,0 +1,62 @@
+#!/bin/sh
+
+#/*
+# * Licensed to the Apache Software Foundation (ASF) under one
+# * or more contributor license agreements.  See the NOTICE file
+# * distributed with this work for additional information
+# * regarding copyright ownership.  The ASF licenses this file
+# * to you under the Apache License, Version 2.0 (the
+# * "License"); you may not use this file except in compliance
+# * with the License.  You may obtain a copy of the License at
+# *
+# *     http://www.apache.org/licenses/LICENSE-2.0
+# *
+# * Unless required by applicable law or agreed to in writing, software
+# * distributed under the License is distributed on an "AS IS" BASIS,
+# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# * See the License for the specific language governing permissions and
+# * limitations under the License.
+# */
+
+cd `dirname ${0}`;
+
+GANGLIA_CONF_DIR={{ganglia_conf_dir}};
+GANGLIA_RUNTIME_DIR={{ganglia_runtime_dir}};
+RRDCACHED_BASE_DIR={{rrdcached_base_dir}};
+
+# This file contains all the info about each Ganglia Cluster in our Grid.
+GANGLIA_CLUSTERS_CONF_FILE=./gangliaClusters.conf;
+
+function createDirectory()
+{
+    directoryPath=${1};
+
+    if [ "x" != "x${directoryPath}" ]
+    then
+        mkdir -p ${directoryPath};
+    fi
+}
+
+function getGangliaClusterInfo()
+{
+    clusterName=${1};
+
+    if [ "x" != "x${clusterName}" ]
+    then
+        # Fetch the particular entry for ${clusterName} from ${GANGLIA_CLUSTERS_CONF_FILE}.
+        awk -v clusterName=${clusterName} '($1 !~ /^#/) && ($1 == clusterName)' ${GANGLIA_CLUSTERS_CONF_FILE};
+    else
+        # Spit out all the non-comment, non-empty lines from ${GANGLIA_CLUSTERS_CONF_FILE}.
+        awk '($1 !~ /^#/) && (NF)' ${GANGLIA_CLUSTERS_CONF_FILE};
+    fi
+}
+
+function getConfiguredGangliaClusterNames()
+{
+  # Find all the subdirectories in ${GANGLIA_CONF_DIR} and extract only 
+  # the subdirectory name from each.
+  if [ -e ${GANGLIA_CONF_DIR} ]
+  then  
+    find ${GANGLIA_CONF_DIR} -maxdepth 1 -mindepth 1 -type d | xargs -n1 basename;
+  fi
+}

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/global.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/global.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/global.xml
new file mode 100644
index 0000000..b2c57bd
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/global.xml
@@ -0,0 +1,160 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>hbasemaster_host</name>
+    <value></value>
+    <description>HBase Master Host.</description>
+  </property>
+  <property>
+    <name>regionserver_hosts</name>
+    <value></value>
+    <description>Region Server Hosts</description>
+  </property>
+  <property>
+    <name>hbase_log_dir</name>
+    <value>/var/log/hbase</value>
+    <description>Log Directories for HBase.</description>
+  </property>
+  <property>
+    <name>hbase_pid_dir</name>
+    <value>/var/run/hbase</value>
+    <description>Log Directories for HBase.</description>
+  </property>
+  <property>
+    <name>hbase_log_dir</name>
+    <value>/var/log/hbase</value>
+    <description>Log Directories for HBase.</description>
+  </property>
+  <property>
+    <name>hbase_regionserver_heapsize</name>
+    <value>1024</value>
+    <description>Log Directories for HBase.</description>
+  </property>
+  <property>
+    <name>hbase_master_heapsize</name>
+    <value>1024</value>
+    <description>HBase Master Heap Size</description>
+  </property>
+  <property>
+    <name>hstore_compactionthreshold</name>
+    <value>3</value>
+    <description>HBase HStore compaction threshold.</description>
+  </property>
+  <property>
+    <name>hfile_blockcache_size</name>
+    <value>0.40</value>
+    <description>HFile block cache size.</description>
+  </property>
+  <property>
+    <name>hstorefile_maxsize</name>
+    <value>10737418240</value>
+    <description>Maximum HStoreFile Size</description>
+  </property>
+    <property>
+    <name>regionserver_handlers</name>
+    <value>60</value>
+    <description>HBase RegionServer Handler</description>
+  </property>
+    <property>
+    <name>hregion_majorcompaction</name>
+    <value>604800000</value>
+    <description>The time between major compactions of all HStoreFiles in a region. Set to 0 to disable automated major compactions.</description>
+  </property>
+    <property>
+    <name>hregion_blockmultiplier</name>
+    <value>2</value>
+    <description>HBase Region Block Multiplier</description>
+  </property>
+    <property>
+    <name>hregion_memstoreflushsize</name>
+    <value></value>
+    <description>HBase Region MemStore Flush Size.</description>
+  </property>
+    <property>
+    <name>client_scannercaching</name>
+    <value>100</value>
+    <description>Base Client Scanner Caching</description>
+  </property>
+    <property>
+    <name>zookeeper_sessiontimeout</name>
+    <value>30000</value>
+    <description>ZooKeeper Session Timeout</description>
+  </property>
+    <property>
+    <name>hfile_max_keyvalue_size</name>
+    <value>10485760</value>
+    <description>HBase Client Maximum key-value Size</description>
+  </property>
+  <property>
+    <name>hbase_hdfs_root_dir</name>
+    <value>/apps/hbase/data</value>
+    <description>HBase Relative Path to HDFS.</description>
+  </property>
+   <property>
+    <name>hbase_conf_dir</name>
+    <value>/etc/hbase</value>
+    <description>Config Directory for HBase.</description>
+  </property>
+   <property>
+    <name>hdfs_enable_shortcircuit_read</name>
+    <value>true</value>
+    <description>HDFS Short Circuit Read</description>
+  </property>
+   <property>
+    <name>hdfs_support_append</name>
+    <value>true</value>
+    <description>HDFS append support</description>
+  </property>
+   <property>
+    <name>hstore_blockingstorefiles</name>
+    <value>10</value>
+    <description>HStore blocking storefiles.</description>
+  </property>
+   <property>
+    <name>regionserver_memstore_lab</name>
+    <value>true</value>
+    <description>Region Server memstore.</description>
+  </property>
+   <property>
+    <name>regionserver_memstore_lowerlimit</name>
+    <value>0.38</value>
+    <description>Region Server memstore lower limit.</description>
+  </property>
+   <property>
+    <name>regionserver_memstore_upperlimit</name>
+    <value>0.4</value>
+    <description>Region Server memstore upper limit.</description>
+  </property>
+   <property>
+    <name>hbase_conf_dir</name>
+    <value>/etc/hbase</value>
+    <description>HBase conf dir.</description>
+  </property>
+   <property>
+    <name>hbase_user</name>
+    <value>hbase</value>
+    <description>HBase User Name.</description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/hbase-policy.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/hbase-policy.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/hbase-policy.xml
new file mode 100644
index 0000000..e45f23c
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/hbase-policy.xml
@@ -0,0 +1,53 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+
+<configuration>
+  <property>
+    <name>security.client.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for HRegionInterface protocol implementations (ie. 
+    clients talking to HRegionServers)
+    The ACL is a comma-separated list of user and group names. The user and 
+    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.admin.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for HMasterInterface protocol implementation (ie. 
+    clients talking to HMaster for admin operations).
+    The ACL is a comma-separated list of user and group names. The user and 
+    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
+    A special value of "*" means all users are allowed.</description>
+  </property>
+
+  <property>
+    <name>security.masterregion.protocol.acl</name>
+    <value>*</value>
+    <description>ACL for HMasterRegionInterface protocol implementations
+    (for HRegionServers communicating with HMaster)
+    The ACL is a comma-separated list of user and group names. The user and 
+    group list is separated by a blank. For e.g. "alice,bob users,wheel". 
+    A special value of "*" means all users are allowed.</description>
+  </property>
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/hbase-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/hbase-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/hbase-site.xml
new file mode 100644
index 0000000..bf4af7d
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/configuration/hbase-site.xml
@@ -0,0 +1,356 @@
+<?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+<!--
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+-->
+<configuration>
+  <property>
+    <name>hbase.rootdir</name>
+    <value>hdfs://localhost:8020/apps/hbase/data</value>
+    <description>The directory shared by region servers and into
+    which HBase persists.  The URL should be 'fully-qualified'
+    to include the filesystem scheme.  For example, to specify the
+    HDFS directory '/hbase' where the HDFS instance's namenode is
+    running at namenode.example.org on port 9000, set this value to:
+    hdfs://namenode.example.org:9000/hbase.  By default HBase writes
+    into /tmp.  Change this configuration else all data will be lost
+    on machine restart.
+    </description>
+  </property>
+  <property>
+    <name>hbase.cluster.distributed</name>
+    <value>true</value>
+    <description>The mode the cluster will be in. Possible values are
+      false for standalone mode and true for distributed mode.  If
+      false, startup will run all HBase and ZooKeeper daemons together
+      in the one JVM.
+    </description>
+  </property>
+  <property>
+    <name>hbase.tmp.dir</name>
+    <value>/hadoop/hbase</value>
+    <description>Temporary directory on the local filesystem.
+    Change this setting to point to a location more permanent
+    than '/tmp' (The '/tmp' directory is often cleared on
+    machine restart).
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.info.bindAddress</name>
+    <value></value>
+    <description>The bind address for the HBase Master web UI
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.info.port</name>
+    <value></value>
+    <description>The port for the HBase Master web UI.</description>
+  </property>
+  <property>
+    <name>hbase.regionserver.info.port</name>
+    <value></value>
+    <description>The port for the HBase RegionServer web UI.</description>
+  </property>
+  <property>
+    <name>hbase.regionserver.global.memstore.upperLimit</name>
+    <value>0.4</value>
+    <description>Maximum size of all memstores in a region server before new
+      updates are blocked and flushes are forced. Defaults to 40% of heap
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.handler.count</name>
+    <value>60</value>
+    <description>Count of RPC Listener instances spun up on RegionServers.
+    Same property is used by the Master for count of master handlers.
+    Default is 10.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.majorcompaction</name>
+    <value>86400000</value>
+    <description>The time (in milliseconds) between 'major' compactions of all
+    HStoreFiles in a region.  Default: 1 day.
+    Set to 0 to disable automated major compactions.
+    </description>
+  </property>
+  
+  <property>
+    <name>hbase.regionserver.global.memstore.lowerLimit</name>
+    <value>0.38</value>
+    <description>When memstores are being forced to flush to make room in
+      memory, keep flushing until we hit this mark. Defaults to 35% of heap.
+      This value equal to hbase.regionserver.global.memstore.upperLimit causes
+      the minimum possible flushing to occur when updates are blocked due to
+      memstore limiting.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.memstore.block.multiplier</name>
+    <value>2</value>
+    <description>Block updates if memstore has hbase.hregion.memstore.block.multiplier
+    time hbase.hregion.flush.size bytes.  Useful preventing
+    runaway memstore during spikes in update traffic.  Without an
+    upper-bound, memstore fills such that when it flushes the
+    resultant flush files take a long time to compact or split, or
+    worse, we OOME
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.memstore.flush.size</name>
+    <value>134217728</value>
+    <description>
+    Memstore will be flushed to disk if size of the memstore
+    exceeds this number of bytes.  Value is checked by a thread that runs
+    every hbase.server.thread.wakefrequency.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.memstore.mslab.enabled</name>
+    <value>true</value>
+    <description>
+      Enables the MemStore-Local Allocation Buffer,
+      a feature which works to prevent heap fragmentation under
+      heavy write loads. This can reduce the frequency of stop-the-world
+      GC pauses on large heaps.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hregion.max.filesize</name>
+    <value>10737418240</value>
+    <description>
+    Maximum HStoreFile size. If any one of a column families' HStoreFiles has
+    grown to exceed this value, the hosting HRegion is split in two.
+    Default: 1G.
+    </description>
+  </property>
+  <property>
+    <name>hbase.client.scanner.caching</name>
+    <value>100</value>
+    <description>Number of rows that will be fetched when calling next
+    on a scanner if it is not served from (local, client) memory. Higher
+    caching values will enable faster scanners but will eat up more memory
+    and some calls of next may take longer and longer times when the cache is empty.
+    Do not set this value such that the time between invocations is greater
+    than the scanner timeout; i.e. hbase.regionserver.lease.period
+    </description>
+  </property>
+  <property>
+    <name>zookeeper.session.timeout</name>
+    <value>30000</value>
+    <description>ZooKeeper session timeout.
+      HBase passes this to the zk quorum as suggested maximum time for a
+      session (This setting becomes zookeeper's 'maxSessionTimeout').  See
+      http://hadoop.apache.org/zookeeper/docs/current/zookeeperProgrammers.html#ch_zkSessions
+      "The client sends a requested timeout, the server responds with the
+      timeout that it can give the client. " In milliseconds.
+    </description>
+  </property>
+  <property>
+    <name>hbase.client.keyvalue.maxsize</name>
+    <value>10485760</value>
+    <description>Specifies the combined maximum allowed size of a KeyValue
+    instance. This is to set an upper boundary for a single entry saved in a
+    storage file. Since they cannot be split it helps avoiding that a region
+    cannot be split any further because the data is too large. It seems wise
+    to set this to a fraction of the maximum region size. Setting it to zero
+    or less disables the check.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hstore.compactionThreshold</name>
+    <value>3</value>
+    <description>
+    If more than this number of HStoreFiles in any one HStore
+    (one HStoreFile is written per flush of memstore) then a compaction
+    is run to rewrite all HStoreFiles files as one.  Larger numbers
+    put off compaction but when it runs, it takes longer to complete.
+    </description>
+  </property>
+  <property>
+    <name>hbase.hstore.flush.retries.number</name>
+    <value>120</value>
+    <description>
+    The number of times the region flush operation will be retried.
+    </description>
+  </property>
+  
+  <property>
+    <name>hbase.hstore.blockingStoreFiles</name>
+    <value>10</value>
+    <description>
+    If more than this number of StoreFiles in any one Store
+    (one StoreFile is written per flush of MemStore) then updates are
+    blocked for this HRegion until a compaction is completed, or
+    until hbase.hstore.blockingWaitTime has been exceeded.
+    </description>
+  </property>
+  <property>
+    <name>hfile.block.cache.size</name>
+    <value>0.40</value>
+    <description>
+        Percentage of maximum heap (-Xmx setting) to allocate to block cache
+        used by HFile/StoreFile. Default of 0.25 means allocate 25%.
+        Set to 0 to disable but it's not recommended.
+    </description>
+  </property>
+
+  <!-- The following properties configure authentication information for
+       HBase processes when using Kerberos security.  There are no default
+       values, included here for documentation purposes -->
+  <property>
+    <name>hbase.master.keytab.file</name>
+    <value></value>
+    <description>Full path to the kerberos keytab file to use for logging in
+    the configured HMaster server principal.
+    </description>
+  </property>
+  <property>
+    <name>hbase.master.kerberos.principal</name>
+    <value></value>
+    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
+    that should be used to run the HMaster process.  The principal name should
+    be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the hostname
+    portion, it will be replaced with the actual hostname of the running
+    instance.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.keytab.file</name>
+    <value></value>
+    <description>Full path to the kerberos keytab file to use for logging in
+    the configured HRegionServer server principal.
+    </description>
+  </property>
+  <property>
+    <name>hbase.regionserver.kerberos.principal</name>
+    <value></value>
+    <description>Ex. "hbase/_HOST@EXAMPLE.COM".  The kerberos principal name
+    that should be used to run the HRegionServer process.  The principal name
+    should be in the form: user/hostname@DOMAIN.  If "_HOST" is used as the
+    hostname portion, it will be replaced with the actual hostname of the
+    running instance.  An entry for this principal must exist in the file
+    specified in hbase.regionserver.keytab.file
+    </description>
+  </property>
+
+  <!-- Additional configuration specific to HBase security -->
+  <property>
+    <name>hbase.superuser</name>
+    <value>hbase</value>
+    <description>List of users or groups (comma-separated), who are allowed
+    full privileges, regardless of stored ACLs, across the cluster.
+    Only used when HBase security is enabled.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.security.authentication</name>
+    <value>simple</value>
+  </property>
+
+  <property>
+    <name>hbase.security.authorization</name>
+    <value>false</value>
+    <description>Enables HBase authorization. Set the value of this property to false to disable HBase authorization.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.coprocessor.region.classes</name>
+    <value></value>
+    <description>A comma-separated list of Coprocessors that are loaded by
+    default on all tables. For any override coprocessor method, these classes
+    will be called in order. After implementing your own Coprocessor, just put
+    it in HBase's classpath and add the fully qualified class name here.
+    A coprocessor can also be loaded on demand by setting HTableDescriptor.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.coprocessor.master.classes</name>
+    <value></value>
+    <description>A comma-separated list of
+      org.apache.hadoop.hbase.coprocessor.MasterObserver coprocessors that are
+      loaded by default on the active HMaster process. For any implemented
+      coprocessor methods, the listed classes will be called in order. After
+      implementing your own MasterObserver, just put it in HBase's classpath
+      and add the fully qualified class name here.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.zookeeper.property.clientPort</name>
+    <value>2181</value>
+    <description>Property from ZooKeeper's config zoo.cfg.
+    The port at which the clients will connect.
+    </description>
+  </property>
+
+  <!--
+  The following three properties are used together to create the list of
+  host:peer_port:leader_port quorum servers for ZooKeeper.
+  -->
+  <property>
+    <name>hbase.zookeeper.quorum</name>
+    <value>localhost</value>
+    <description>Comma separated list of servers in the ZooKeeper Quorum.
+    For example, "host1.mydomain.com,host2.mydomain.com,host3.mydomain.com".
+    By default this is set to localhost for local and pseudo-distributed modes
+    of operation. For a fully-distributed setup, this should be set to a full
+    list of ZooKeeper quorum servers. If HBASE_MANAGES_ZK is set in hbase-env.sh
+    this is the list of servers which we will start/stop ZooKeeper on.
+    </description>
+  </property>
+  <!-- End of properties used to generate ZooKeeper host:port quorum list. -->
+
+  <property>
+    <name>hbase.zookeeper.useMulti</name>
+    <value>true</value>
+    <description>Instructs HBase to make use of ZooKeeper's multi-update functionality.
+    This allows certain ZooKeeper operations to complete more quickly and prevents some issues
+    with rare Replication failure scenarios (see the release note of HBASE-2611 for an example).·
+    IMPORTANT: only set this to true if all ZooKeeper servers in the cluster are on version 3.4+
+    and will not be downgraded.  ZooKeeper versions before 3.4 do not support multi-update and will
+    not fail gracefully if multi-update is invoked (see ZOOKEEPER-1495).
+    </description>
+  </property>
+  <property>
+    <name>zookeeper.znode.parent</name>
+    <value>/hbase-unsecure</value>
+    <description>Root ZNode for HBase in ZooKeeper. All of HBase's ZooKeeper
+      files that are configured with a relative path will go under this node.
+      By default, all of HBase's ZooKeeper file path are configured with a
+      relative path, so they will all go under this directory unless changed.
+    </description>
+  </property>
+
+  <property>
+    <name>hbase.defaults.for.version.skip</name>
+    <value>true</value>
+    <description>Disables version verification.</description>
+  </property>
+
+  <property>
+    <name>dfs.domain.socket.path</name>
+    <value>/var/lib/hadoop-hdfs/dn_socket</value>
+    <description>Path to domain socket.</description>
+  </property>
+
+</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/metainfo.xml
new file mode 100644
index 0000000..15d1045
--- /dev/null
+++ b/ambari-server/src/main/resources/stacks/HDP/2.1.1/services/HBASE/metainfo.xml
@@ -0,0 +1,93 @@
+<?xml version="1.0"?>
+<!--
+   Licensed to the Apache Software Foundation (ASF) under one or more
+   contributor license agreements.  See the NOTICE file distributed with
+   this work for additional information regarding copyright ownership.
+   The ASF licenses this file to You under the Apache License, Version 2.0
+   (the "License"); you may not use this file except in compliance with
+   the License.  You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.
+-->
+<metainfo>
+  <schemaVersion>2.0</schemaVersion>
+  <services>
+    <service>
+      <name>HBASE</name>
+      <comment>Non-relational distributed database and centralized service for configuration management &amp;
+        synchronization
+      </comment>
+      <version>0.96.0.2.1.1</version>
+      <components>
+        <component>
+          <name>HBASE_MASTER</name>
+          <category>MASTER</category>
+          <commandScript>
+            <script>scripts/hbase_master.py</script>
+            <scriptType>PYTHON</scriptType>
+            <timeout>600</timeout>
+          </commandScript>
+          <customCommands>
+            <customCommand>
+              <name>DECOMMISSION</name>
+              <commandScript>
+                <script>scripts/hbase_master.py</script>
+                <scriptType>PYTHON</scriptType>
+                <timeout>600</timeout>
+              </commandScript>
+            </customCommand>
+          </customCommands>
+        </component>
+
+        <component>
+          <name>HBASE_REGIONSERVER</name>
+          <category>SLAVE</category>
+          <commandScript>
+            <script>scripts/hbase_regionserver.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+        </component>
+
+        <component>
+          <name>HBASE_CLIENT</name>
+          <category>CLIENT</category>
+          <commandScript>
+            <script>scripts/hbase_client.py</script>
+            <scriptType>PYTHON</scriptType>
+          </commandScript>
+        </component>
+      </components>
+
+      <osSpecifics>
+        <osSpecific>
+          <osType>centos6</osType>
+          <packages>
+            <package>
+              <type>rpm</type>
+              <name>hbase</name>
+            </package>
+          </packages>
+        </osSpecific>
+      </osSpecifics>
+
+      <commandScript>
+        <script>scripts/service_check.py</script>
+        <scriptType>PYTHON</scriptType>
+        <timeout>300</timeout>
+      </commandScript>
+
+      <configuration-dependencies>
+        <config-type>global</config-type>
+        <config-type>hbase-policy</config-type>
+        <config-type>hbase-site</config-type>
+      </configuration-dependencies>
+
+    </service>
+  </services>
+</metainfo>


[23/37] AMBARI-4341. Rename 2.0.8 to 2.1.1 in the stack definition. (mahadev)

Posted by ma...@apache.org.
http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/files/checkForFormat.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/files/checkForFormat.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/files/checkForFormat.sh
deleted file mode 100644
index d14091a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/files/checkForFormat.sh
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-export hdfs_user=$1
-shift
-export conf_dir=$1
-shift
-export mark_dir=$1
-shift
-export name_dirs=$*
-
-export EXIT_CODE=0
-export command="namenode -format"
-export list_of_non_empty_dirs=""
-
-mark_file=/var/run/hadoop/hdfs/namenode-formatted
-if [[ -f ${mark_file} ]] ; then
-  rm -f ${mark_file}
-  mkdir -p ${mark_dir}
-fi
-
-if [[ ! -d $mark_dir ]] ; then
-  for dir in `echo $name_dirs | tr ',' ' '` ; do
-    echo "NameNode Dirname = $dir"
-    cmd="ls $dir | wc -l  | grep -q ^0$"
-    eval $cmd
-    if [[ $? -ne 0 ]] ; then
-      (( EXIT_CODE = $EXIT_CODE + 1 ))
-      list_of_non_empty_dirs="$list_of_non_empty_dirs $dir"
-    fi
-  done
-
-  if [[ $EXIT_CODE == 0 ]] ; then
-    su - ${hdfs_user} -c "yes Y | hadoop --config ${conf_dir} ${command}"
-  else
-    echo "ERROR: Namenode directory(s) is non empty. Will not format the namenode. List of non-empty namenode dirs ${list_of_non_empty_dirs}"
-  fi
-else
-  echo "${mark_dir} exists. Namenode DFS already formatted"
-fi
-
-exit $EXIT_CODE
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/files/checkWebUI.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/files/checkWebUI.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/files/checkWebUI.py
deleted file mode 100644
index f8e9c1a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/files/checkWebUI.py
+++ /dev/null
@@ -1,53 +0,0 @@
-#!/usr/bin/env python
-
-'''
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-'''
-
-import optparse
-import httplib
-
-#
-# Main.
-#
-def main():
-  parser = optparse.OptionParser(usage="usage: %prog [options] component ")
-  parser.add_option("-m", "--hosts", dest="hosts", help="Comma separated hosts list for WEB UI to check it availability")
-  parser.add_option("-p", "--port", dest="port", help="Port of WEB UI to check it availability")
-
-  (options, args) = parser.parse_args()
-  
-  hosts = options.hosts.split(',')
-  port = options.port
-
-  for host in hosts:
-    try:
-      conn = httplib.HTTPConnection(host, port)
-      # This can be modified to get a partial url part to be sent with request
-      conn.request("GET", "/")
-      httpCode = conn.getresponse().status
-      conn.close()
-    except Exception:
-      httpCode = 404
-
-    if httpCode != 200:
-      print "Cannot access WEB UI on: http://" + host + ":" + port
-      exit(1)
-      
-
-if __name__ == "__main__":
-  main()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/datanode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/datanode.py
deleted file mode 100644
index eaa27cf..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/datanode.py
+++ /dev/null
@@ -1,57 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from hdfs_datanode import datanode
-
-
-class DataNode(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env)
-    env.set_params(params)
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-    self.config(env)
-    datanode(action="start")
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-    datanode(action="stop")
-
-  def config(self, env):
-    import params
-
-    datanode(action="configure")
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    check_process_status(status_params.datanode_pid_file)
-
-
-if __name__ == "__main__":
-  DataNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/hdfs_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/hdfs_client.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/hdfs_client.py
deleted file mode 100644
index ec24c7d..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/hdfs_client.py
+++ /dev/null
@@ -1,52 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from utils import service
-
-
-class HdfsClient(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env)
-    env.set_params(params)
-    self.config(env)
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-  def config(self, env):
-    import params
-
-    pass
-
-
-if __name__ == "__main__":
-  HdfsClient().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/hdfs_datanode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/hdfs_datanode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/hdfs_datanode.py
deleted file mode 100644
index e0b6c39..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/hdfs_datanode.py
+++ /dev/null
@@ -1,56 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from utils import service
-
-
-def datanode(action=None):
-  import params
-
-  if action == "configure":
-    Directory(params.dfs_domain_socket_dir,
-              recursive=True,
-              mode=0750,
-              owner=params.hdfs_user,
-              group=params.user_group)
-    Directory(params.dfs_data_dir,
-              recursive=True,
-              mode=0755,
-              owner=params.hdfs_user,
-              group=params.user_group)
-
-  if action == "start":
-    service(
-      action=action, name="datanode",
-      user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True,
-      keytab=params.dfs_datanode_keytab_file,
-      principal=params.dfs_datanode_kerberos_principal
-    )
-  if action == "stop":
-    service(
-      action=action, name="datanode",
-      user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True,
-      keytab=params.dfs_datanode_keytab_file,
-      principal=params.dfs_datanode_kerberos_principal
-    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/hdfs_namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/hdfs_namenode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/hdfs_namenode.py
deleted file mode 100644
index 8b29cc3..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/hdfs_namenode.py
+++ /dev/null
@@ -1,212 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from utils import service
-from utils import hdfs_directory
-import urlparse
-
-
-def namenode(action=None, format=True):
-  import params
-  #we need this directory to be present before any action(HA manual steps for
-  #additional namenode)
-  if action == "configure":
-    create_name_dirs(params.dfs_name_dir)
-
-  if action == "start":
-    if format:
-      format_namenode()
-      pass
-
-    File(params.exclude_file_path,
-         content=Template("exclude_hosts_list.j2"),
-         owner=params.hdfs_user,
-         group=params.user_group
-    )
-
-    service(
-      action="start", name="namenode", user=params.hdfs_user,
-      keytab=params.dfs_namenode_keytab_file,
-      create_pid_dir=True,
-      create_log_dir=True,
-      principal=params.dfs_namenode_kerberos_principal
-    )
-
-    # TODO: extract creating of dirs to different services
-    create_app_directories()
-    create_user_directories()
-
-  if action == "stop":
-    service(
-      action="stop", name="namenode", user=params.hdfs_user,
-      keytab=params.dfs_namenode_keytab_file,
-      principal=params.dfs_namenode_kerberos_principal
-    )
-
-  if action == "decommission":
-    decommission()
-
-def create_name_dirs(directories):
-  import params
-
-  dirs = directories.split(",")
-  Directory(dirs,
-            mode=0755,
-            owner=params.hdfs_user,
-            group=params.user_group,
-            recursive=True
-  )
-
-
-def create_app_directories():
-  import params
-
-  hdfs_directory(name="/tmp",
-                 owner=params.hdfs_user,
-                 mode="777"
-  )
-  #mapred directories
-  if params.has_histroryserver:
-    hdfs_directory(name="/mapred",
-                   owner=params.mapred_user
-    )
-    hdfs_directory(name="/mapred/system",
-                   owner=params.hdfs_user
-    )
-    #hbase directories
-  if len(params.hbase_master_hosts) != 0:
-    hdfs_directory(name=params.hbase_hdfs_root_dir,
-                   owner=params.hbase_user
-    )
-    hdfs_directory(name=params.hbase_staging_dir,
-                   owner=params.hbase_user,
-                   mode="711"
-    )
-    #hive directories
-  if len(params.hive_server_host) != 0:
-    hdfs_directory(name=params.hive_apps_whs_dir,
-                   owner=params.hive_user,
-                   mode="777"
-    )
-  if len(params.hcat_server_hosts) != 0:
-    hdfs_directory(name=params.webhcat_apps_dir,
-                   owner=params.webhcat_user,
-                   mode="755"
-    )
-  if len(params.hs_host) != 0:
-    if params.yarn_log_aggregation_enabled:
-      hdfs_directory(name=params.yarn_nm_app_log_dir,
-                     owner=params.yarn_user,
-                     group=params.user_group,
-                     mode="777",
-                     recursive_chmod=True
-      )
-    hdfs_directory(name=params.mapreduce_jobhistory_intermediate_done_dir,
-                   owner=params.mapred_user,
-                   group=params.user_group,
-                   mode="777"
-    )
-
-    hdfs_directory(name=params.mapreduce_jobhistory_done_dir,
-                   owner=params.mapred_user,
-                   group=params.user_group,
-                   mode="1777"
-    )
-
-  if params.has_falcon_host:
-    if params.falcon_store_uri[0:4] == "hdfs":
-      hdfs_directory(name=params.store_uri,
-                     owner=params.falcon_user,
-                     mode="755"
-      )
-
-def create_user_directories():
-  import params
-
-  hdfs_directory(name=params.smoke_hdfs_user_dir,
-                 owner=params.smoke_user,
-                 mode=params.smoke_hdfs_user_mode
-  )
-
-  if params.has_hive_server_host:
-    hdfs_directory(name=params.hive_hdfs_user_dir,
-                   owner=params.hive_user,
-                   mode=params.hive_hdfs_user_mode
-    )
-
-  if params.has_hcat_server_host:
-    if params.hcat_hdfs_user_dir != params.webhcat_hdfs_user_dir:
-      hdfs_directory(name=params.hcat_hdfs_user_dir,
-                     owner=params.hcat_user,
-                     mode=params.hcat_hdfs_user_mode
-      )
-    hdfs_directory(name=params.webhcat_hdfs_user_dir,
-                   owner=params.webhcat_user,
-                   mode=params.webhcat_hdfs_user_mode
-    )
-
-  if params.has_oozie_server:
-    hdfs_directory(name=params.oozie_hdfs_user_dir,
-                   owner=params.oozie_user,
-                   mode=params.oozie_hdfs_user_mode
-    )
-
-
-def format_namenode(force=None):
-  import params
-
-  mark_dir = params.namenode_formatted_mark_dir
-  dfs_name_dir = params.dfs_name_dir
-  hdfs_user = params.hdfs_user
-  hadoop_conf_dir = params.hadoop_conf_dir
-
-  if not params.dfs_ha_enabled:
-    if force:
-      ExecuteHadoop('namenode -format',
-                    kinit_override=True)
-    else:
-      File('/tmp/checkForFormat.sh',
-           content=StaticFile("checkForFormat.sh"),
-           mode=0755)
-      Execute(format(
-        "sh /tmp/checkForFormat.sh {hdfs_user} {hadoop_conf_dir} {mark_dir} "
-        "{dfs_name_dir}"),
-              not_if=format("test -d {mark_dir}"),
-              path="/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin")
-    Execute(format("mkdir -p {mark_dir}"))
-
-
-def decommission():
-  import params
-
-  hdfs_user = params.hdfs_user
-  conf_dir = params.hadoop_conf_dir
-  user_group = params.user_group
-
-  File(params.exclude_file_path,
-       content=Template("exclude_hosts_list.j2"),
-       owner=hdfs_user,
-       group=user_group
-  )
-
-  ExecuteHadoop('dfsadmin -refreshNodes',
-                user=hdfs_user,
-                conf_dir=conf_dir,
-                kinit_override=True)

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/hdfs_snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/hdfs_snamenode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/hdfs_snamenode.py
deleted file mode 100644
index a943455..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/hdfs_snamenode.py
+++ /dev/null
@@ -1,53 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from utils import service
-from utils import hdfs_directory
-
-
-def snamenode(action=None, format=False):
-  import params
-
-  if action == "configure":
-    Directory(params.fs_checkpoint_dir,
-              recursive=True,
-              mode=0755,
-              owner=params.hdfs_user,
-              group=params.user_group)
-  elif action == "start":
-    service(
-      action=action,
-      name="secondarynamenode",
-      user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True,
-      keytab=params.dfs_secondary_namenode_keytab_file,
-      principal=params.dfs_secondary_namenode_kerberos_principal
-    )
-  elif action == "stop":
-    service(
-      action=action,
-      name="secondarynamenode",
-      user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True,
-      keytab=params.dfs_secondary_namenode_keytab_file,
-      principal=params.dfs_secondary_namenode_kerberos_principal
-    )

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/journalnode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/journalnode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/journalnode.py
deleted file mode 100644
index fd355cc..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/journalnode.py
+++ /dev/null
@@ -1,74 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from utils import service
-
-
-class JournalNode(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env)
-    env.set_params(params)
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-    self.config(env)
-    service(
-      action="start", name="journalnode", user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True,
-      keytab=params.dfs_journalnode_keytab_file,
-      principal=params.dfs_journalnode_kerberos_principal
-    )
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-    service(
-      action="stop", name="journalnode", user=params.hdfs_user,
-      create_pid_dir=True,
-      create_log_dir=True,
-      keytab=params.dfs_journalnode_keytab_file,
-      principal=params.dfs_journalnode_kerberos_principal
-    )
-
-  def config(self, env):
-    import params
-
-    Directory(params.jn_edits_dir,
-              recursive=True,
-              owner=params.hdfs_user,
-              group=params.user_group
-    )
-    pass
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    check_process_status(status_params.journalnode_pid_file)
-
-
-if __name__ == "__main__":
-  JournalNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/namenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/namenode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/namenode.py
deleted file mode 100644
index deb01d5..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/namenode.py
+++ /dev/null
@@ -1,68 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from hdfs_namenode import namenode
-
-
-class NameNode(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env)
-    env.set_params(params)
-    #TODO remove when config action will be implemented
-    self.config(env)
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-    self.config(env)
-    namenode(action="start")
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-    namenode(action="stop")
-
-  def config(self, env):
-    import params
-
-    env.set_params(params)
-    namenode(action="configure")
-    pass
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-    check_process_status(status_params.namenode_pid_file)
-    pass
-
-  def decommission(self, env):
-    import params
-
-    env.set_params(params)
-    namenode(action="decommission")
-    pass
-
-if __name__ == "__main__":
-  NameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/params.py
deleted file mode 100644
index 685e25f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/params.py
+++ /dev/null
@@ -1,188 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import status_params
-import os
-
-config = Script.get_config()
-
-#security params
-security_enabled = config['configurations']['global']['security_enabled']
-dfs_journalnode_keytab_file = config['configurations']['hdfs-site']['dfs.journalnode.keytab.file']
-dfs_web_authentication_kerberos_keytab = config['configurations']['hdfs-site']['dfs.journalnode.keytab.file']
-dfs_secondary_namenode_keytab_file =  config['configurations']['hdfs-site']['dfs.secondary.namenode.keytab.file']
-dfs_datanode_keytab_file =  config['configurations']['hdfs-site']['dfs.datanode.keytab.file']
-dfs_namenode_keytab_file =  config['configurations']['hdfs-site']['dfs.namenode.keytab.file']
-smoke_user_keytab = config['configurations']['global']['smokeuser_keytab']
-hdfs_user_keytab = config['configurations']['global']['hdfs_user_keytab']
-falcon_user = config['configurations']['global']['falcon_user']
-
-dfs_datanode_kerberos_principal = config['configurations']['hdfs-site']['dfs.datanode.kerberos.principal']
-dfs_journalnode_kerberos_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.principal']
-dfs_secondary_namenode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.internal.spnego.principal']
-dfs_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.namenode.kerberos.principal']
-dfs_web_authentication_kerberos_principal = config['configurations']['hdfs-site']['dfs.web.authentication.kerberos.principal']
-dfs_secondary_namenode_kerberos_principal = config['configurations']['hdfs-site']['dfs.secondary.namenode.kerberos.principal']
-dfs_journalnode_kerberos_internal_spnego_principal = config['configurations']['hdfs-site']['dfs.journalnode.kerberos.internal.spnego.principal']
-
-#exclude file
-hdfs_exclude_file = default("/clusterHostInfo/decom_dn_hosts", [])
-exclude_file_path = config['configurations']['hdfs-site']['dfs.hosts.exclude']
-
-kinit_path_local = get_kinit_path([default("kinit_path_local",None), "/usr/bin", "/usr/kerberos/bin", "/usr/sbin"])
-#hosts
-hostname = config["hostname"]
-rm_host = default("/clusterHostInfo/rm_host", [])
-slave_hosts = default("/clusterHostInfo/slave_hosts", [])
-hagios_server_hosts = default("/clusterHostInfo/nagios_server_host", [])
-oozie_servers = default("/clusterHostInfo/oozie_server", [])
-hcat_server_hosts = default("/clusterHostInfo/webhcat_server_host", [])
-hive_server_host =  default("/clusterHostInfo/hive_server_host", [])
-hbase_master_hosts = default("/clusterHostInfo/hbase_master_hosts", [])
-hs_host = default("/clusterHostInfo/hs_host", [])
-jtnode_host = default("/clusterHostInfo/jtnode_host", [])
-namenode_host = default("/clusterHostInfo/namenode_host", [])
-nm_host = default("/clusterHostInfo/nm_host", [])
-ganglia_server_hosts = default("/clusterHostInfo/ganglia_server_host", [])
-journalnode_hosts = default("/clusterHostInfo/journalnode_hosts", [])
-zkfc_hosts = default("/clusterHostInfo/zkfc_hosts", [])
-falcon_host = default("/clusterHostInfo/falcon_server_hosts", [])
-
-has_ganglia_server = not len(ganglia_server_hosts) == 0
-has_namenodes = not len(namenode_host) == 0
-has_jobtracker = not len(jtnode_host) == 0
-has_resourcemanager = not len(rm_host) == 0
-has_histroryserver = not len(hs_host) == 0
-has_hbase_masters = not len(hbase_master_hosts) == 0
-has_slaves = not len(slave_hosts) == 0
-has_nagios = not len(hagios_server_hosts) == 0
-has_oozie_server = not len(oozie_servers)  == 0
-has_hcat_server_host = not len(hcat_server_hosts)  == 0
-has_hive_server_host = not len(hive_server_host)  == 0
-has_journalnode_hosts = not len(journalnode_hosts)  == 0
-has_zkfc_hosts = not len(zkfc_hosts)  == 0
-has_falcon_host = not len(falcon_host)  == 0
-
-
-is_namenode_master = hostname in namenode_host
-is_jtnode_master = hostname in jtnode_host
-is_rmnode_master = hostname in rm_host
-is_hsnode_master = hostname in hs_host
-is_hbase_master = hostname in hbase_master_hosts
-is_slave = hostname in slave_hosts
-
-if has_ganglia_server:
-  ganglia_server_host = ganglia_server_hosts[0]
-
-#users and groups
-yarn_user = config['configurations']['global']['yarn_user']
-hbase_user = config['configurations']['global']['hbase_user']
-nagios_user = config['configurations']['global']['nagios_user']
-oozie_user = config['configurations']['global']['oozie_user']
-webhcat_user = config['configurations']['global']['hcat_user']
-hcat_user = config['configurations']['global']['hcat_user']
-hive_user = config['configurations']['global']['hive_user']
-smoke_user =  config['configurations']['global']['smokeuser']
-mapred_user = config['configurations']['global']['mapred_user']
-hdfs_user = status_params.hdfs_user
-
-user_group = config['configurations']['global']['user_group']
-proxyuser_group =  config['configurations']['global']['proxyuser_group']
-nagios_group = config['configurations']['global']['nagios_group']
-smoke_user_group = "users"
-
-#hadoop params
-hadoop_conf_dir = "/etc/hadoop/conf"
-hadoop_pid_dir_prefix = status_params.hadoop_pid_dir_prefix
-hadoop_bin = "/usr/lib/hadoop/sbin"
-
-hdfs_log_dir_prefix = config['configurations']['global']['hdfs_log_dir_prefix']
-
-dfs_domain_socket_path = config['configurations']['hdfs-site']['dfs.domain.socket.path']
-dfs_domain_socket_dir = os.path.dirname(dfs_domain_socket_path)
-
-hadoop_libexec_dir = "/usr/lib/hadoop/libexec"
-
-jn_edits_dir = config['configurations']['hdfs-site']['dfs.journalnode.edits.dir']#"/grid/0/hdfs/journal"
-
-# if stack_version[0] == "2":
-dfs_name_dir = config['configurations']['hdfs-site']['dfs.namenode.name.dir']
-# else:
-#   dfs_name_dir = default("/configurations/hdfs-site/dfs.name.dir","/tmp/hadoop-hdfs/dfs/name")
-
-namenode_dirs_created_stub_dir = format("{hdfs_log_dir_prefix}/{hdfs_user}")
-namenode_dirs_stub_filename = "namenode_dirs_created"
-
-hbase_hdfs_root_dir = config['configurations']['hbase-site']['hbase.rootdir']#","/apps/hbase/data")
-hbase_staging_dir = "/apps/hbase/staging"
-hive_apps_whs_dir = config['configurations']['hive-site']["hive.metastore.warehouse.dir"] #, "/apps/hive/warehouse")
-webhcat_apps_dir = "/apps/webhcat"
-yarn_log_aggregation_enabled = config['configurations']['yarn-site']['yarn.log-aggregation-enable']#","true")
-yarn_nm_app_log_dir =  config['configurations']['yarn-site']['yarn.nodemanager.remote-app-log-dir']#","/app-logs")
-mapreduce_jobhistory_intermediate_done_dir = config['configurations']['mapred-site']['mapreduce.jobhistory.intermediate-done-dir']#","/app-logs")
-mapreduce_jobhistory_done_dir = config['configurations']['mapred-site']['mapreduce.jobhistory.done-dir']#","/mr-history/done")
-
-if has_oozie_server:
-  oozie_hdfs_user_dir = format("/user/{oozie_user}")
-  oozie_hdfs_user_mode = 775
-if has_hcat_server_host:
-  hcat_hdfs_user_dir = format("/user/{hcat_user}")
-  hcat_hdfs_user_mode = 755
-  webhcat_hdfs_user_dir = format("/user/{webhcat_user}")
-  webhcat_hdfs_user_mode = 755
-if has_hive_server_host:
-  hive_hdfs_user_dir = format("/user/{hive_user}")
-  hive_hdfs_user_mode = 700
-smoke_hdfs_user_dir = format("/user/{smoke_user}")
-smoke_hdfs_user_mode = 770
-
-namenode_formatted_mark_dir = format("{hadoop_pid_dir_prefix}/hdfs/namenode/formatted/")
-
-# if stack_version[0] == "2":
-fs_checkpoint_dir = config['configurations']['hdfs-site']['dfs.namenode.checkpoint.dir'] #","/tmp/hadoop-hdfs/dfs/namesecondary")
-# else:
-#   fs_checkpoint_dir = default("/configurations/core-site/fs.checkpoint.dir","/tmp/hadoop-hdfs/dfs/namesecondary")
-
-# if stack_version[0] == "2":
-dfs_data_dir = config['configurations']['hdfs-site']['dfs.datanode.data.dir']#,"/tmp/hadoop-hdfs/dfs/data")
-# else:
-#   dfs_data_dir = default('/configurations/hdfs-site/dfs.data.dir',"/tmp/hadoop-hdfs/dfs/data")
-
-# HDFS High Availability properties
-dfs_ha_enabled = False
-dfs_ha_nameservices = default("/configurations/hdfs-site/dfs.nameservices", None)
-dfs_ha_namenode_ids = default(format("/configurations/hdfs-site/dfs.ha.namenodes.{dfs_ha_nameservices}"), None)
-if dfs_ha_namenode_ids:
-  dfs_ha_namemodes_ids_list = dfs_ha_namenode_ids.split(",")
-  dfs_ha_namenode_ids_array_len = len(dfs_ha_namemodes_ids_list)
-  if dfs_ha_namenode_ids_array_len > 1:
-    dfs_ha_enabled = True
-if dfs_ha_enabled:
-  for nn_id in dfs_ha_namemodes_ids_list:
-    nn_host = config['configurations']['hdfs-site'][format('dfs.namenode.rpc-address.{dfs_ha_nameservices}.{nn_id}')]
-    if hostname in nn_host:
-      namenode_id = nn_id
-  namenode_id = None
-
-journalnode_address = default('/configurations/hdfs-site/dfs.journalnode.http-address', None)
-if journalnode_address:
-  journalnode_port = journalnode_address.split(":")[1]
-
-falcon_store_uri = default('configurations/global/falcon_store_uri', None)
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/service_check.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/service_check.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/service_check.py
deleted file mode 100644
index d27b13a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/service_check.py
+++ /dev/null
@@ -1,107 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-
-class HdfsServiceCheck(Script):
-  def service_check(self, env):
-    import params
-
-    env.set_params(params)
-    unique = get_unique_id_and_date()
-    dir = '/tmp'
-    tmp_file = format("{dir}/{unique}")
-
-    safemode_command = "dfsadmin -safemode get | grep OFF"
-
-    create_dir_cmd = format("fs -mkdir {dir} ; hadoop fs -chmod -R 777 {dir}")
-    test_dir_exists = format("hadoop fs -test -e {dir}")
-    cleanup_cmd = format("fs -rm {tmp_file}")
-    #cleanup put below to handle retries; if retrying there wil be a stale file
-    #that needs cleanup; exit code is fn of second command
-    create_file_cmd = format(
-      "{cleanup_cmd}; hadoop fs -put /etc/passwd {tmp_file}")
-    test_cmd = format("fs -test -e {tmp_file}")
-    if params.security_enabled:
-      Execute(format(
-        "su - {smoke_user} -c '{kinit_path_local} -kt {smoke_user_keytab} "
-        "{smoke_user}'"))
-    ExecuteHadoop(safemode_command,
-                  user=params.smoke_user,
-                  logoutput=True,
-                  conf_dir=params.hadoop_conf_dir,
-                  try_sleep=3,
-                  tries=5
-    )
-    ExecuteHadoop(create_dir_cmd,
-                  user=params.smoke_user,
-                  logoutput=True,
-                  not_if=test_dir_exists,
-                  conf_dir=params.hadoop_conf_dir,
-                  try_sleep=3,
-                  tries=5
-    )
-    ExecuteHadoop(create_file_cmd,
-                  user=params.smoke_user,
-                  logoutput=True,
-                  conf_dir=params.hadoop_conf_dir,
-                  try_sleep=3,
-                  tries=5
-    )
-    ExecuteHadoop(test_cmd,
-                  user=params.smoke_user,
-                  logoutput=True,
-                  conf_dir=params.hadoop_conf_dir,
-                  try_sleep=3,
-                  tries=5
-    )
-    if params.has_journalnode_hosts:
-      journalnode_port = params.journalnode_port
-      smoke_test_user = params.smoke_user
-      checkWebUIFileName = "checkWebUI.py"
-      checkWebUIFilePath = format("/tmp/{checkWebUIFileName}")
-      comma_sep_jn_hosts = ",".join(params.journalnode_hosts)
-      checkWebUICmd = format(
-        "su - {smoke_test_user} -c 'python {checkWebUIFilePath} -m "
-        "{comma_sep_jn_hosts} -p {journalnode_port}'")
-      File(checkWebUIFilePath,
-           content=StaticFile(checkWebUIFileName))
-
-      Execute(checkWebUICmd,
-              logoutput=True,
-              try_sleep=3,
-              tries=5
-      )
-
-    if params.is_namenode_master:
-      if params.has_zkfc_hosts:
-        pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
-        pid_file = format("{pid_dir}/hadoop-{hdfs_user}-zkfc.pid")
-        check_zkfc_process_cmd = format(
-          "ls {pid_file} >/dev/null 2>&1 && ps `cat {pid_file}` >/dev/null 2>&1")
-        Execute(check_zkfc_process_cmd,
-                logoutput=True,
-                try_sleep=3,
-                tries=5
-        )
-
-
-if __name__ == "__main__":
-  HdfsServiceCheck().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/snamenode.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/snamenode.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/snamenode.py
deleted file mode 100644
index 8f682ec..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/snamenode.py
+++ /dev/null
@@ -1,64 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from hdfs_snamenode import snamenode
-
-
-class SNameNode(Script):
-  def install(self, env):
-    import params
-
-    env.set_params(params)
-
-    self.install_packages(env)
-
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-
-    self.config(env)
-    snamenode(action="start")
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-
-    snamenode(action="stop")
-
-  def config(self, env):
-    import params
-
-    env.set_params(params)
-
-    snamenode(action="configure")
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-
-    check_process_status(status_params.snamenode_pid_file)
-
-
-if __name__ == "__main__":
-  SNameNode().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/status_params.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/status_params.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/status_params.py
deleted file mode 100644
index 4097373..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/status_params.py
+++ /dev/null
@@ -1,31 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-config = Script.get_config()
-
-hadoop_pid_dir_prefix = config['configurations']['global']['hadoop_pid_dir_prefix']
-hdfs_user = config['configurations']['global']['hdfs_user']
-hdp_pid_dir = format("{hadoop_pid_dir_prefix}/{hdfs_user}")
-datanode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-datanode.pid")
-namenode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-namenode.pid")
-snamenode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-secondarynamenode.pid")
-journalnode_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-journalnode.pid")
-zkfc_pid_file = format("{hdp_pid_dir}/hadoop-{hdfs_user}-zkfc.pid")

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/utils.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/utils.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/utils.py
deleted file mode 100644
index 225cd2e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/utils.py
+++ /dev/null
@@ -1,138 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-
-
-def service(action=None, name=None, user=None, create_pid_dir=False,
-            create_log_dir=False, keytab=None, principal=None):
-  import params
-
-  kinit_cmd = "true"
-  pid_dir = format("{hadoop_pid_dir_prefix}/{user}")
-  pid_file = format("{pid_dir}/hadoop-{user}-{name}.pid")
-  log_dir = format("{hdfs_log_dir_prefix}/{user}")
-  hadoop_daemon = format(
-    "export HADOOP_LIBEXEC_DIR={hadoop_libexec_dir} && "
-    "{hadoop_bin}/hadoop-daemon.sh")
-  cmd = format("{hadoop_daemon} --config {hadoop_conf_dir}")
-
-  if create_pid_dir:
-    Directory(pid_dir,
-              owner=user,
-              recursive=True)
-  if create_log_dir:
-    Directory(log_dir,
-              owner=user,
-              recursive=True)
-
-  if params.security_enabled:
-    principal_replaced = principal.replace("_HOST", params.hostname)
-    kinit_cmd = format("kinit -kt {keytab} {principal_replaced}")
-
-    if name == "datanode":
-      user = "root"
-      pid_file = format(
-        "{hadoop_pid_dir_prefix}/{hdfs_user}/hadoop-{hdfs_user}-{name}.pid")
-
-  daemon_cmd = format("{cmd} {action} {name}")
-
-  service_is_up = format(
-    "ls {pid_file} >/dev/null 2>&1 &&"
-    " ps `cat {pid_file}` >/dev/null 2>&1") if action == "start" else None
-
-  Execute(kinit_cmd)
-  Execute(daemon_cmd,
-          user = user,
-          not_if=service_is_up
-  )
-  if action == "stop":
-    File(pid_file,
-         action="delete",
-         ignore_failures=True
-    )
-
-
-def hdfs_directory(name=None, owner=None, group=None,
-                   mode=None, recursive_chown=False, recursive_chmod=False):
-  import params
-
-  dir_exists = format("hadoop fs -ls {name} >/dev/null 2>&1")
-  namenode_safe_mode_off = "hadoop dfsadmin -safemode get|grep 'Safe mode is OFF'"
-
-  stub_dir = params.namenode_dirs_created_stub_dir
-  stub_filename = params.namenode_dirs_stub_filename
-  dir_absent_in_stub = format(
-    "grep -q '^{name}$' {stub_dir}/{stub_filename} > /dev/null 2>&1; test $? -ne 0")
-  record_dir_in_stub = format("echo '{name}' >> {stub_dir}/{stub_filename}")
-  tries = 3
-  try_sleep = 10
-  dfs_check_nn_status_cmd = "true"
-
-  if params.dfs_ha_enabled:
-    namenode_id = params.namenode_id
-    dfs_check_nn_status_cmd = format(
-      "hdfs haadmin -getServiceState $namenode_id | grep active > /dev/null")
-
-  #if params.stack_version[0] == "2":
-  mkdir_cmd = format("fs -mkdir -p {name}")
-  #else:
-  #  mkdir_cmd = format("fs -mkdir {name}")
-
-  if params.security_enabled:
-    Execute(format("kinit -kt {hdfs_user_keytab} {hdfs_user}"),
-            user = params.hdfs_user)
-  ExecuteHadoop(mkdir_cmd,
-                try_sleep=try_sleep,
-                tries=tries,
-                not_if=format(
-                  "! {dir_absent_in_stub} && {dfs_check_nn_status_cmd} && "
-                  "{dir_exists} && ! {namenode_safe_mode_off}"),
-                only_if=format(
-                  "{dir_absent_in_stub} && {dfs_check_nn_status_cmd} && "
-                  "! {dir_exists}"),
-                conf_dir=params.hadoop_conf_dir,
-                user=params.hdfs_user
-  )
-  Execute(record_dir_in_stub,
-          user=params.hdfs_user,
-          only_if=format("{dir_absent_in_stub}")
-  )
-
-  recursive = "-R" if recursive_chown else ""
-  perm_cmds = []
-
-  if owner:
-    chown = owner
-    if group:
-      chown = format("{owner}:{group}")
-    perm_cmds.append(format("fs -chown {recursive} {chown} {name}"))
-  if mode:
-    perm_cmds.append(format("fs -chmod {recursive} {mode} {name}"))
-  for cmd in perm_cmds:
-    ExecuteHadoop(cmd,
-                  user=params.hdfs_user,
-                  only_if=format("! {dir_absent_in_stub} && {dfs_check_nn_status_cmd} && {namenode_safe_mode_off} && {dir_exists}"),
-                  try_sleep=try_sleep,
-                  tries=tries,
-                  conf_dir=params.hadoop_conf_dir
-    )
-
-
-

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/zkfc_slave.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/zkfc_slave.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/zkfc_slave.py
deleted file mode 100644
index 1f9ba65..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/scripts/zkfc_slave.py
+++ /dev/null
@@ -1,62 +0,0 @@
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-from utils import service
-
-
-class ZkfcSlave(Script):
-  def install(self, env):
-    import params
-
-    self.install_packages(env)
-    env.set_params(params)
-
-  def start(self, env):
-    import params
-
-    env.set_params(params)
-    self.config(env)
-    service(
-      action="start", name="zkfc", user=params.hdfs_user, create_pid_dir=True,
-      create_log_dir=True
-    )
-
-  def stop(self, env):
-    import params
-
-    env.set_params(params)
-    service(
-      action="stop", name="zkfc", user=params.hdfs_user, create_pid_dir=True,
-      create_log_dir=True
-    )
-
-  def config(self, env):
-    pass
-
-  def status(self, env):
-    import status_params
-
-    env.set_params(status_params)
-
-    check_process_status(status_params.zkfc_pid_file)
-
-
-if __name__ == "__main__":
-  ZkfcSlave().execute()

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/templates/exclude_hosts_list.j2
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/templates/exclude_hosts_list.j2 b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/templates/exclude_hosts_list.j2
deleted file mode 100644
index c3af46e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HDFS/package/templates/exclude_hosts_list.j2
+++ /dev/null
@@ -1,3 +0,0 @@
-{% for host in hdfs_exclude_file %}
-{{host}}
-{% endfor %}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/configuration/hive-site.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/configuration/hive-site.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/configuration/hive-site.xml
deleted file mode 100644
index b3ed5f5..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/configuration/hive-site.xml
+++ /dev/null
@@ -1,267 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-<!--
-Licensed to the Apache Software Foundation (ASF) under one or more
-contributor license agreements. See the NOTICE file distributed with
-this work for additional information regarding copyright ownership.
-The ASF licenses this file to You under the Apache License, Version 2.0
-(the "License"); you may not use this file except in compliance with
-the License. You may obtain a copy of the License at
-
-http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
--->
-
-<configuration>
-
-  <property>
-    <name>ambari.hive.db.schema.name</name>
-    <value>hive</value>
-    <description>Database name used as the Hive Metastore</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionURL</name>
-    <value>jdbc</value>
-    <description>JDBC connect string for a JDBC metastore</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionDriverName</name>
-    <value>com.mysql.jdbc.Driver</value>
-    <description>Driver class name for a JDBC metastore</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionUserName</name>
-    <value>hive</value>
-    <description>username to use against metastore database</description>
-  </property>
-
-  <property>
-    <name>javax.jdo.option.ConnectionPassword</name>
-    <value> </value>
-    <description>password to use against metastore database</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.warehouse.dir</name>
-    <value>/apps/hive/warehouse</value>
-    <description>location of default database for the warehouse</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.sasl.enabled</name>
-    <value></value>
-    <description>If true, the metastore thrift interface will be secured with SASL.
-     Clients must authenticate with Kerberos.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.kerberos.keytab.file</name>
-    <value></value>
-    <description>The path to the Kerberos Keytab file containing the metastore
-     thrift server's service principal.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.kerberos.principal</name>
-    <value></value>
-    <description>The service principal for the metastore thrift server. The special
-    string _HOST will be replaced automatically with the correct host name.</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.cache.pinobjtypes</name>
-    <value>Table,Database,Type,FieldSchema,Order</value>
-    <description>List of comma separated metastore object types that should be pinned in the cache</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.uris</name>
-    <value>thrift://localhost:9083</value>
-    <description>URI for client to contact metastore server</description>
-  </property>
-
-  <property>
-    <name>hive.semantic.analyzer.factory.impl</name>
-    <value>org.apache.hivealog.cli.HCatSemanticAnalyzerFactory</value>
-    <description>controls which SemanticAnalyzerFactory implemenation class is used by CLI</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.client.socket.timeout</name>
-    <value>60</value>
-    <description>MetaStore Client socket timeout in seconds</description>
-  </property>
-
-  <property>
-    <name>hive.metastore.execute.setugi</name>
-    <value>true</value>
-    <description>In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using the client's reported user and group permissions. Note that this property must be set on both the client and     server sides. Further note that its best effort. If client sets its to true and server sets it to false, client setting will be ignored.</description>
-  </property>
-
-  <property>
-    <name>hive.security.authorization.enabled</name>
-    <value>false</value>
-    <description>enable or disable the hive client authorization</description>
-  </property>
-
-  <property>
-    <name>hive.security.authorization.manager</name>
-    <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value>
-    <description>the hive client authorization manager class name.
-    The user defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider.  </description>
-  </property>
-
-  <property>
-    <name>hive.security.metastore.authorization.manager</name>
-    <value>org.apache.hadoop.hive.ql.security.authorization.StorageBasedAuthorizationProvider</value>
-    <description>The authorization manager class name to be used in the metastore for authorization. The user-defined authorization class should implement interface org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider.  </description>
-  </property>
-
-  <property>
-    <name>hive.security.authenticator.manager</name>
-    <value>org.apache.hadoop.hive.ql.security.ProxyUserAuthenticator</value>
-    <description>Hive client authenticator manager class name. The user-defined authenticator class should implement interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider.  </description>
-  </property>
-
-  <property>
-    <name>hive.server2.enable.doAs</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>fs.hdfs.impl.disable.cache</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>fs.file.impl.disable.cache</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>hive.enforce.bucketing</name>
-    <value>true</value>
-    <description>Whether bucketing is enforced. If true, while inserting into the table, bucketing is enforced.</description>
-  </property>
-
-  <property>
-    <name>hive.enforce.sorting</name>
-    <value>true</value>
-    <description>Whether sorting is enforced. If true, while inserting into the table, sorting is enforced.</description>
-  </property>
-
-  <property>
-    <name>hive.map.aggr</name>
-    <value>true</value>
-    <description>Whether to use map-side aggregation in Hive Group By queries.</description>
-  </property>
-
-  <property>
-    <name>hive.optimize.bucketmapjoin</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>hive.optimize.bucketmapjoin.sortedmerge</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>hive.mapred.reduce.tasks.speculative.execution</name>
-    <value>false</value>
-    <description>Whether speculative execution for reducers should be turned on.</description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.join</name>
-    <value>true</value>
-    <description>Whether Hive enable the optimization about converting common
-      join into mapjoin based on the input file size.</description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.sortmerge.join</name>
-    <value>true</value>
-    <description>Will the join be automatically converted to a sort-merge join, if the joined tables pass
-      the criteria for sort-merge join.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.sortmerge.join.noconditionaltask</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.join.noconditionaltask</name>
-    <value>true</value>
-    <description>Whether Hive enable the optimization about converting common join into mapjoin based on the input file
-      size. If this paramater is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the
-      specified size, the join is directly converted to a mapjoin (there is no conditional task).
-    </description>
-  </property>
-
-  <property>
-    <name>hive.auto.convert.join.noconditionaltask.size</name>
-    <value>1000000000</value>
-    <description>If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. However, if it
-      is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, the join is directly
-      converted to a mapjoin(there is no conditional task). The default is 10MB.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.optimize.reducededuplication.min.reducer</name>
-    <value>1</value>
-    <description>Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS.
-      That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.
-      The optimization will be disabled if number of reducers is less than specified value.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.optimize.mapjoin.mapreduce</name>
-    <value>true</value>
-    <description>If hive.auto.convert.join is off, this parameter does not take
-      affect. If it is on, and if there are map-join jobs followed by a map-reduce
-      job (for e.g a group by), each map-only job is merged with the following
-      map-reduce job.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.mapjoin.bucket.cache.size</name>
-    <value>10000</value>
-    <description>
-      Size per reducer.The default is 1G, i.e if the input size is 10G, it
-      will use 10 reducers.
-    </description>
-  </property>
-
-  <property>
-    <name>hive.vectorized.execution.enabled</name>
-    <value>false</value>
-  </property>
-
-  <property>
-    <name>hive.optimize.reducededuplication</name>
-    <value>true</value>
-  </property>
-
-  <property>
-    <name>hive.optimize.index.filter</name>
-    <value>true</value>
-    <description>
-    Whether to enable automatic use of indexes
-    </description>
-  </property>
-
-</configuration>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/metainfo.xml
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/metainfo.xml b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/metainfo.xml
deleted file mode 100644
index 2384fe9..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/metainfo.xml
+++ /dev/null
@@ -1,156 +0,0 @@
-<?xml version="1.0"?>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<metainfo>
-  <schemaVersion>2.0</schemaVersion>
-  <services>
-    <service>
-      <name>HIVE</name>
-      <comment>Data warehouse system for ad-hoc queries &amp; analysis of large datasets and table &amp; storage management service</comment>
-      <version>0.12.0.2.0.6.0</version>
-      <components>
-
-        <component>
-          <name>HIVE_METASTORE</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/hive_metastore.py</script>
-            <scriptType>PYTHON</scriptType>
-            <timeout>600</timeout>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>HIVE_SERVER</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/hive_server.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>MYSQL_SERVER</name>
-          <category>MASTER</category>
-          <commandScript>
-            <script>scripts/mysql_server.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-
-        <component>
-          <name>HIVE_CLIENT</name>
-          <category>CLIENT</category>
-          <commandScript>
-            <script>scripts/hive_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-      </components>
-
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>hive</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>mysql-connector-java</name>
-            </package>
-            <package>
-              <type>rpm</type>
-              <name>mysql</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osType>centos6</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>mysql-server</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osType>centos5</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>mysql-server</name>
-            </package>
-          </packages>
-        </osSpecific>
-        <osSpecific>
-          <osType>suse</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>mysql-client</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-
-      <configuration-dependencies>
-        <config-type>hive-site</config-type>
-      </configuration-dependencies>
-    </service>
-
-    <service>
-      <name>HCATALOG</name>
-      <comment>This is comment for HCATALOG service</comment>
-      <version>0.12.0.2.0.6.0</version>
-      <components>
-        <component>
-          <name>HCAT</name>
-          <category>CLIENT</category>
-          <commandScript>
-            <script>scripts/hcat_client.py</script>
-            <scriptType>PYTHON</scriptType>
-          </commandScript>
-        </component>
-      </components>
-      <osSpecifics>
-        <osSpecific>
-          <osType>any</osType>
-          <packages>
-            <package>
-              <type>rpm</type>
-              <name>hcatalog</name>
-            </package>
-          </packages>
-        </osSpecific>
-      </osSpecifics>
-      <commandScript>
-        <script>scripts/service_check.py</script>
-        <scriptType>PYTHON</scriptType>
-        <timeout>300</timeout>
-      </commandScript>
-    </service>
-
-  </services>
-</metainfo>

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/files/addMysqlUser.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/files/addMysqlUser.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/files/addMysqlUser.sh
deleted file mode 100644
index 8d31b91..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/files/addMysqlUser.sh
+++ /dev/null
@@ -1,41 +0,0 @@
-#!/bin/sh
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-mysqldservice=$1
-mysqldbuser=$2
-mysqldbpasswd=$3
-mysqldbhost=$4
-myhostname=$(hostname -f)
-
-service $mysqldservice start
-echo "Adding user $mysqldbuser@$mysqldbhost and $mysqldbuser@localhost"
-mysql -u root -e "CREATE USER '$mysqldbuser'@'$mysqldbhost' IDENTIFIED BY '$mysqldbpasswd';"
-mysql -u root -e "CREATE USER '$mysqldbuser'@'localhost' IDENTIFIED BY '$mysqldbpasswd';"
-mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'$mysqldbhost';"
-mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'localhost';"
-if [ '$(mysql -u root -e "select user from mysql.user where user='$mysqldbuser' and host='$myhostname'" | grep "$mysqldbuser")' != '0' ]; then
-  echo "Adding user $mysqldbuser@$myhostname";
-  mysql -u root -e "CREATE USER '$mysqldbuser'@'$myhostname' IDENTIFIED BY '$mysqldbpasswd';";
-  mysql -u root -e "GRANT ALL PRIVILEGES ON *.* TO '$mysqldbuser'@'$myhostname';";
-fi
-mysql -u root -e "flush privileges;"
-service $mysqldservice stop

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/files/hcatSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/files/hcatSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/files/hcatSmoke.sh
deleted file mode 100644
index 9e7b33f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/files/hcatSmoke.sh
+++ /dev/null
@@ -1,35 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-export tablename=$1
-
-case "$2" in
-
-prepare)
-  hcat -e "show tables"
-  hcat -e "drop table IF EXISTS ${tablename}"
-  hcat -e "create table ${tablename} ( id INT, name string ) stored as rcfile ;"
-;;
-
-cleanup)
-  hcat -e "drop table IF EXISTS ${tablename}"
-;;
-
-esac

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/files/hiveSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/files/hiveSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/files/hiveSmoke.sh
deleted file mode 100644
index 7e03524..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/files/hiveSmoke.sh
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-export tablename=$1
-echo "CREATE EXTERNAL TABLE IF NOT EXISTS ${tablename} ( foo INT, bar STRING );" | hive
-echo "DESCRIBE ${tablename};" | hive

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/files/hiveserver2.sql
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/files/hiveserver2.sql b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/files/hiveserver2.sql
deleted file mode 100644
index 99a3865..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/files/hiveserver2.sql
+++ /dev/null
@@ -1,23 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-CREATE EXTERNAL TABLE IF NOT EXISTS hiveserver2smoke20408 ( foo INT, bar STRING );
-DESCRIBE hiveserver2smoke20408;

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/files/hiveserver2Smoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/files/hiveserver2Smoke.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/files/hiveserver2Smoke.sh
deleted file mode 100644
index 051a21e..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/files/hiveserver2Smoke.sh
+++ /dev/null
@@ -1,31 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-
-smokeout=`/usr/lib/hive/bin/beeline -u $1 -n fakeuser -p fakepwd -d org.apache.hive.jdbc.HiveDriver -e '!run $2' 2>&1| awk '{print}'|grep Error`
-
-if [ "x$smokeout" == "x" ]; then
-  echo "Smoke test of hiveserver2 passed"
-  exit 0
-else
-  echo "Smoke test of hiveserver2 wasnt passed"
-  echo $smokeout
-  exit 1
-fi

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/files/pigSmoke.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/files/pigSmoke.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/files/pigSmoke.sh
deleted file mode 100644
index 2e90ac0..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/files/pigSmoke.sh
+++ /dev/null
@@ -1,18 +0,0 @@
-# Licensed to the Apache Software Foundation (ASF) under one or more
-# contributor license agreements.  See the NOTICE file distributed with
-# this work for additional information regarding copyright ownership.
-# The ASF licenses this file to You under the Apache License, Version 2.0
-# (the "License"); you may not use this file except in compliance with
-# the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License
-
-A = load 'passwd' using PigStorage(':');
-B = foreach A generate \$0 as id;
-store B into 'pigsmoke.out';

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/files/startHiveserver2.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/files/startHiveserver2.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/files/startHiveserver2.sh
deleted file mode 100644
index fa90c2f..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/files/startHiveserver2.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-HIVE_CONF_DIR=$4 /usr/lib/hive/bin/hiveserver2 -hiveconf hive.metastore.uris=' ' > $1 2> $2 &
-echo $!|cat>$3

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/files/startMetastore.sh
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/files/startMetastore.sh b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/files/startMetastore.sh
deleted file mode 100644
index 9350776..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/files/startMetastore.sh
+++ /dev/null
@@ -1,22 +0,0 @@
-#
-#
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#   http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing,
-# software distributed under the License is distributed on an
-# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
-# KIND, either express or implied.  See the License for the
-# specific language governing permissions and limitations
-# under the License.
-#
-#
-HIVE_CONF_DIR=$4 hive --service metastore > $1 2> $2 &
-echo $!|cat>$3

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/__init__.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/__init__.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/__init__.py
deleted file mode 100644
index 5561e10..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/__init__.py
+++ /dev/null
@@ -1,19 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/hcat.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/hcat.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/hcat.py
deleted file mode 100644
index 2993d3a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/hcat.py
+++ /dev/null
@@ -1,47 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-from resource_management import *
-import sys
-
-
-def hcat():
-  import params
-
-  Directory(params.hcat_conf_dir,
-            owner=params.hcat_user,
-            group=params.user_group,
-  )
-
-  Directory(params.hcat_pid_dir,
-            owner=params.webhcat_user,
-            recursive=True
-  )
-
-  hcat_TemplateConfig('hcat-env.sh')
-
-
-def hcat_TemplateConfig(name):
-  import params
-
-  TemplateConfig(format("{hcat_conf_dir}/{name}"),
-                 owner=params.hcat_user,
-                 group=params.user_group
-  )

http://git-wip-us.apache.org/repos/asf/ambari/blob/ae534ed3/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/hcat_client.py
----------------------------------------------------------------------
diff --git a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/hcat_client.py b/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/hcat_client.py
deleted file mode 100644
index 8b5921a..0000000
--- a/ambari-server/src/main/resources/stacks/HDP/2.0.8/services/HIVE/package/scripts/hcat_client.py
+++ /dev/null
@@ -1,43 +0,0 @@
-#!/usr/bin/env python
-"""
-Licensed to the Apache Software Foundation (ASF) under one
-or more contributor license agreements.  See the NOTICE file
-distributed with this work for additional information
-regarding copyright ownership.  The ASF licenses this file
-to you under the Apache License, Version 2.0 (the
-"License"); you may not use this file except in compliance
-with the License.  You may obtain a copy of the License at
-
-    http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-
-"""
-
-import sys
-from resource_management import *
-from hcat import hcat
-
-class HCatClient(Script):
-  def install(self, env):
-    self.install_packages(env)
-    self.configure(env)
-
-  def configure(self, env):
-    import params
-
-    env.set_params(params)
-
-    hcat()
-
-
-  def status(self, env):
-    raise ClientComponentHasNoStatus()
-
-
-if __name__ == "__main__":
-  HCatClient().execute()